code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def tarjan(graph):
"""Strongly connected components by Tarjan, iterative implementation
:param graph: directed graph in listlist format, cannot be listdict
:returns: list of lists for each component
:complexity: linear
"""
n = len(graph)
dfs_num = [None] * n
dfs_min = [n] * n
waiting = []
waits = [False] * n # invariant: waits[v] iff v in waiting
sccp = [] # list of detected components
dfs_time = 0
times_seen = [-1] * n
for start in range(n):
if times_seen[start] == -1: # initiate path
times_seen[start] = 0
to_visit = [start]
while to_visit:
node = to_visit[-1] # top of stack
if times_seen[node] == 0: # start process
dfs_num[node] = dfs_time
dfs_min[node] = dfs_time
dfs_time += 1
waiting.append(node)
waits[node] = True
children = graph[node]
if times_seen[node] == len(children): # end of process
to_visit.pop() # remove from stack
dfs_min[node] = dfs_num[node] # compute dfs_min
for child in children:
if waits[child] and dfs_min[child] < dfs_min[node]:
dfs_min[node] = dfs_min[child]
if dfs_min[node] == dfs_num[node]: # representative
component = [] # make component
while True: # add nodes
u = waiting.pop()
waits[u] = False
component.append(u)
if u == node: # until repr.
break
sccp.append(component)
else:
child = children[times_seen[node]]
times_seen[node] += 1
if times_seen[child] == -1: # not visited yet
times_seen[child] = 0
to_visit.append(child)
return sccp | def function[tarjan, parameter[graph]]:
constant[Strongly connected components by Tarjan, iterative implementation
:param graph: directed graph in listlist format, cannot be listdict
:returns: list of lists for each component
:complexity: linear
]
variable[n] assign[=] call[name[len], parameter[name[graph]]]
variable[dfs_num] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18bcc9150>]] * name[n]]
variable[dfs_min] assign[=] binary_operation[list[[<ast.Name object at 0x7da18bcc9bd0>]] * name[n]]
variable[waiting] assign[=] list[[]]
variable[waits] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18bccbf40>]] * name[n]]
variable[sccp] assign[=] list[[]]
variable[dfs_time] assign[=] constant[0]
variable[times_seen] assign[=] binary_operation[list[[<ast.UnaryOp object at 0x7da18bcc9900>]] * name[n]]
for taget[name[start]] in starred[call[name[range], parameter[name[n]]]] begin[:]
if compare[call[name[times_seen]][name[start]] equal[==] <ast.UnaryOp object at 0x7da18bcc8460>] begin[:]
call[name[times_seen]][name[start]] assign[=] constant[0]
variable[to_visit] assign[=] list[[<ast.Name object at 0x7da18bcc8d00>]]
while name[to_visit] begin[:]
variable[node] assign[=] call[name[to_visit]][<ast.UnaryOp object at 0x7da18bccb580>]
if compare[call[name[times_seen]][name[node]] equal[==] constant[0]] begin[:]
call[name[dfs_num]][name[node]] assign[=] name[dfs_time]
call[name[dfs_min]][name[node]] assign[=] name[dfs_time]
<ast.AugAssign object at 0x7da18bccbb80>
call[name[waiting].append, parameter[name[node]]]
call[name[waits]][name[node]] assign[=] constant[True]
variable[children] assign[=] call[name[graph]][name[node]]
if compare[call[name[times_seen]][name[node]] equal[==] call[name[len], parameter[name[children]]]] begin[:]
call[name[to_visit].pop, parameter[]]
call[name[dfs_min]][name[node]] assign[=] call[name[dfs_num]][name[node]]
for taget[name[child]] in starred[name[children]] begin[:]
if <ast.BoolOp object at 0x7da18bccbeb0> begin[:]
call[name[dfs_min]][name[node]] assign[=] call[name[dfs_min]][name[child]]
if compare[call[name[dfs_min]][name[node]] equal[==] call[name[dfs_num]][name[node]]] begin[:]
variable[component] assign[=] list[[]]
while constant[True] begin[:]
variable[u] assign[=] call[name[waiting].pop, parameter[]]
call[name[waits]][name[u]] assign[=] constant[False]
call[name[component].append, parameter[name[u]]]
if compare[name[u] equal[==] name[node]] begin[:]
break
call[name[sccp].append, parameter[name[component]]]
return[name[sccp]] | keyword[def] identifier[tarjan] ( identifier[graph] ):
literal[string]
identifier[n] = identifier[len] ( identifier[graph] )
identifier[dfs_num] =[ keyword[None] ]* identifier[n]
identifier[dfs_min] =[ identifier[n] ]* identifier[n]
identifier[waiting] =[]
identifier[waits] =[ keyword[False] ]* identifier[n]
identifier[sccp] =[]
identifier[dfs_time] = literal[int]
identifier[times_seen] =[- literal[int] ]* identifier[n]
keyword[for] identifier[start] keyword[in] identifier[range] ( identifier[n] ):
keyword[if] identifier[times_seen] [ identifier[start] ]==- literal[int] :
identifier[times_seen] [ identifier[start] ]= literal[int]
identifier[to_visit] =[ identifier[start] ]
keyword[while] identifier[to_visit] :
identifier[node] = identifier[to_visit] [- literal[int] ]
keyword[if] identifier[times_seen] [ identifier[node] ]== literal[int] :
identifier[dfs_num] [ identifier[node] ]= identifier[dfs_time]
identifier[dfs_min] [ identifier[node] ]= identifier[dfs_time]
identifier[dfs_time] += literal[int]
identifier[waiting] . identifier[append] ( identifier[node] )
identifier[waits] [ identifier[node] ]= keyword[True]
identifier[children] = identifier[graph] [ identifier[node] ]
keyword[if] identifier[times_seen] [ identifier[node] ]== identifier[len] ( identifier[children] ):
identifier[to_visit] . identifier[pop] ()
identifier[dfs_min] [ identifier[node] ]= identifier[dfs_num] [ identifier[node] ]
keyword[for] identifier[child] keyword[in] identifier[children] :
keyword[if] identifier[waits] [ identifier[child] ] keyword[and] identifier[dfs_min] [ identifier[child] ]< identifier[dfs_min] [ identifier[node] ]:
identifier[dfs_min] [ identifier[node] ]= identifier[dfs_min] [ identifier[child] ]
keyword[if] identifier[dfs_min] [ identifier[node] ]== identifier[dfs_num] [ identifier[node] ]:
identifier[component] =[]
keyword[while] keyword[True] :
identifier[u] = identifier[waiting] . identifier[pop] ()
identifier[waits] [ identifier[u] ]= keyword[False]
identifier[component] . identifier[append] ( identifier[u] )
keyword[if] identifier[u] == identifier[node] :
keyword[break]
identifier[sccp] . identifier[append] ( identifier[component] )
keyword[else] :
identifier[child] = identifier[children] [ identifier[times_seen] [ identifier[node] ]]
identifier[times_seen] [ identifier[node] ]+= literal[int]
keyword[if] identifier[times_seen] [ identifier[child] ]==- literal[int] :
identifier[times_seen] [ identifier[child] ]= literal[int]
identifier[to_visit] . identifier[append] ( identifier[child] )
keyword[return] identifier[sccp] | def tarjan(graph):
"""Strongly connected components by Tarjan, iterative implementation
:param graph: directed graph in listlist format, cannot be listdict
:returns: list of lists for each component
:complexity: linear
"""
n = len(graph)
dfs_num = [None] * n
dfs_min = [n] * n
waiting = []
waits = [False] * n # invariant: waits[v] iff v in waiting
sccp = [] # list of detected components
dfs_time = 0
times_seen = [-1] * n
for start in range(n):
if times_seen[start] == -1: # initiate path
times_seen[start] = 0
to_visit = [start]
while to_visit:
node = to_visit[-1] # top of stack
if times_seen[node] == 0: # start process
dfs_num[node] = dfs_time
dfs_min[node] = dfs_time
dfs_time += 1
waiting.append(node)
waits[node] = True # depends on [control=['if'], data=[]]
children = graph[node]
if times_seen[node] == len(children): # end of process
to_visit.pop() # remove from stack
dfs_min[node] = dfs_num[node] # compute dfs_min
for child in children:
if waits[child] and dfs_min[child] < dfs_min[node]:
dfs_min[node] = dfs_min[child] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
if dfs_min[node] == dfs_num[node]: # representative
component = [] # make component
while True: # add nodes
u = waiting.pop()
waits[u] = False
component.append(u)
if u == node: # until repr.
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
sccp.append(component) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
child = children[times_seen[node]]
times_seen[node] += 1
if times_seen[child] == -1: # not visited yet
times_seen[child] = 0
to_visit.append(child) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['start']]
return sccp |
def loadMatlabImages(self, path, name):
"""
Loads images from a .mat file.
:param path: (string) Path to .mat file
:param name: (string) Object name in the .mat file, just before .mat
Also stores image dimensions to later the original images. If there are
multiple channels, self.numChannels will store the number of channels,
otherwise it will be set to None.
"""
try:
images = sc.loadmat(path)[name]
except KeyError:
raise KeyError('Wrong filename for provided images.')
self._initializeDimensions(images)
return images | def function[loadMatlabImages, parameter[self, path, name]]:
constant[
Loads images from a .mat file.
:param path: (string) Path to .mat file
:param name: (string) Object name in the .mat file, just before .mat
Also stores image dimensions to later the original images. If there are
multiple channels, self.numChannels will store the number of channels,
otherwise it will be set to None.
]
<ast.Try object at 0x7da1b0855e70>
call[name[self]._initializeDimensions, parameter[name[images]]]
return[name[images]] | keyword[def] identifier[loadMatlabImages] ( identifier[self] , identifier[path] , identifier[name] ):
literal[string]
keyword[try] :
identifier[images] = identifier[sc] . identifier[loadmat] ( identifier[path] )[ identifier[name] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string] )
identifier[self] . identifier[_initializeDimensions] ( identifier[images] )
keyword[return] identifier[images] | def loadMatlabImages(self, path, name):
"""
Loads images from a .mat file.
:param path: (string) Path to .mat file
:param name: (string) Object name in the .mat file, just before .mat
Also stores image dimensions to later the original images. If there are
multiple channels, self.numChannels will store the number of channels,
otherwise it will be set to None.
"""
try:
images = sc.loadmat(path)[name] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError('Wrong filename for provided images.') # depends on [control=['except'], data=[]]
self._initializeDimensions(images)
return images |
def liveLogin(self, user, pwd):
"""
Obtain connection parameters from the Microsoft account login page, and perform a login with the given email
address or Skype username, and its password. This emulates a login to Skype for Web on ``login.live.com``.
.. note::
Microsoft accounts with two-factor authentication enabled are not supported, and will cause a
:class:`.SkypeAuthException` to be raised. See the exception definitions for other possible causes.
Args:
user (str): username or email address of the connecting account
pwd (str): password of the connecting account
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
"""
self.tokens["skype"], self.tokenExpiry["skype"] = SkypeLiveAuthProvider(self).auth(user, pwd)
self.getUserId()
self.getRegToken() | def function[liveLogin, parameter[self, user, pwd]]:
constant[
Obtain connection parameters from the Microsoft account login page, and perform a login with the given email
address or Skype username, and its password. This emulates a login to Skype for Web on ``login.live.com``.
.. note::
Microsoft accounts with two-factor authentication enabled are not supported, and will cause a
:class:`.SkypeAuthException` to be raised. See the exception definitions for other possible causes.
Args:
user (str): username or email address of the connecting account
pwd (str): password of the connecting account
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
]
<ast.Tuple object at 0x7da18ede74c0> assign[=] call[call[name[SkypeLiveAuthProvider], parameter[name[self]]].auth, parameter[name[user], name[pwd]]]
call[name[self].getUserId, parameter[]]
call[name[self].getRegToken, parameter[]] | keyword[def] identifier[liveLogin] ( identifier[self] , identifier[user] , identifier[pwd] ):
literal[string]
identifier[self] . identifier[tokens] [ literal[string] ], identifier[self] . identifier[tokenExpiry] [ literal[string] ]= identifier[SkypeLiveAuthProvider] ( identifier[self] ). identifier[auth] ( identifier[user] , identifier[pwd] )
identifier[self] . identifier[getUserId] ()
identifier[self] . identifier[getRegToken] () | def liveLogin(self, user, pwd):
"""
Obtain connection parameters from the Microsoft account login page, and perform a login with the given email
address or Skype username, and its password. This emulates a login to Skype for Web on ``login.live.com``.
.. note::
Microsoft accounts with two-factor authentication enabled are not supported, and will cause a
:class:`.SkypeAuthException` to be raised. See the exception definitions for other possible causes.
Args:
user (str): username or email address of the connecting account
pwd (str): password of the connecting account
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
"""
(self.tokens['skype'], self.tokenExpiry['skype']) = SkypeLiveAuthProvider(self).auth(user, pwd)
self.getUserId()
self.getRegToken() |
def _bump_up_time_by_microsecond(time):
"""
Helper function bumping up the given datetime.time by a microsecond,
cycling around silently to 00:00:00.0 in case of an overflow.
@param time: Time object.
@type time: B{datetime}.I{time}
@return: Time object.
@rtype: B{datetime}.I{time}
"""
dt = datetime.datetime(2000, 1, 1, time.hour, time.minute,
time.second, time.microsecond)
dt += datetime.timedelta(microseconds=1)
return dt.time() | def function[_bump_up_time_by_microsecond, parameter[time]]:
constant[
Helper function bumping up the given datetime.time by a microsecond,
cycling around silently to 00:00:00.0 in case of an overflow.
@param time: Time object.
@type time: B{datetime}.I{time}
@return: Time object.
@rtype: B{datetime}.I{time}
]
variable[dt] assign[=] call[name[datetime].datetime, parameter[constant[2000], constant[1], constant[1], name[time].hour, name[time].minute, name[time].second, name[time].microsecond]]
<ast.AugAssign object at 0x7da18f09ef80>
return[call[name[dt].time, parameter[]]] | keyword[def] identifier[_bump_up_time_by_microsecond] ( identifier[time] ):
literal[string]
identifier[dt] = identifier[datetime] . identifier[datetime] ( literal[int] , literal[int] , literal[int] , identifier[time] . identifier[hour] , identifier[time] . identifier[minute] ,
identifier[time] . identifier[second] , identifier[time] . identifier[microsecond] )
identifier[dt] += identifier[datetime] . identifier[timedelta] ( identifier[microseconds] = literal[int] )
keyword[return] identifier[dt] . identifier[time] () | def _bump_up_time_by_microsecond(time):
"""
Helper function bumping up the given datetime.time by a microsecond,
cycling around silently to 00:00:00.0 in case of an overflow.
@param time: Time object.
@type time: B{datetime}.I{time}
@return: Time object.
@rtype: B{datetime}.I{time}
"""
dt = datetime.datetime(2000, 1, 1, time.hour, time.minute, time.second, time.microsecond)
dt += datetime.timedelta(microseconds=1)
return dt.time() |
def set_output(self, output):
"""
Set the output path for the job. Optional if the runner has been
instantiated with a prefix.
"""
self.output = output
self.logger.info("assigning output to %s", self.output) | def function[set_output, parameter[self, output]]:
constant[
Set the output path for the job. Optional if the runner has been
instantiated with a prefix.
]
name[self].output assign[=] name[output]
call[name[self].logger.info, parameter[constant[assigning output to %s], name[self].output]] | keyword[def] identifier[set_output] ( identifier[self] , identifier[output] ):
literal[string]
identifier[self] . identifier[output] = identifier[output]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[output] ) | def set_output(self, output):
"""
Set the output path for the job. Optional if the runner has been
instantiated with a prefix.
"""
self.output = output
self.logger.info('assigning output to %s', self.output) |
def union_fill_gap(self, i):
'''Like union, but ignores whether the two intervals intersect or not'''
return Interval(min(self.start, i.start), max(self.end, i.end)) | def function[union_fill_gap, parameter[self, i]]:
constant[Like union, but ignores whether the two intervals intersect or not]
return[call[name[Interval], parameter[call[name[min], parameter[name[self].start, name[i].start]], call[name[max], parameter[name[self].end, name[i].end]]]]] | keyword[def] identifier[union_fill_gap] ( identifier[self] , identifier[i] ):
literal[string]
keyword[return] identifier[Interval] ( identifier[min] ( identifier[self] . identifier[start] , identifier[i] . identifier[start] ), identifier[max] ( identifier[self] . identifier[end] , identifier[i] . identifier[end] )) | def union_fill_gap(self, i):
"""Like union, but ignores whether the two intervals intersect or not"""
return Interval(min(self.start, i.start), max(self.end, i.end)) |
def render_item(self, contentitem):
"""
Render the item - but render as search text instead.
"""
plugin = contentitem.plugin
if not plugin.search_output and not plugin.search_fields:
# Only render items when the item was output will be indexed.
raise SkipItem
if not plugin.search_output:
output = ContentItemOutput('', cacheable=False)
else:
output = super(SearchRenderingPipe, self).render_item(contentitem)
if plugin.search_fields:
# Just add the results into the output, but avoid caching that somewhere.
output.html += plugin.get_search_text(contentitem)
output.cacheable = False
return output | def function[render_item, parameter[self, contentitem]]:
constant[
Render the item - but render as search text instead.
]
variable[plugin] assign[=] name[contentitem].plugin
if <ast.BoolOp object at 0x7da1b11c3df0> begin[:]
<ast.Raise object at 0x7da1b11c13f0>
if <ast.UnaryOp object at 0x7da1b11c1fc0> begin[:]
variable[output] assign[=] call[name[ContentItemOutput], parameter[constant[]]]
if name[plugin].search_fields begin[:]
<ast.AugAssign object at 0x7da1b11c2ce0>
name[output].cacheable assign[=] constant[False]
return[name[output]] | keyword[def] identifier[render_item] ( identifier[self] , identifier[contentitem] ):
literal[string]
identifier[plugin] = identifier[contentitem] . identifier[plugin]
keyword[if] keyword[not] identifier[plugin] . identifier[search_output] keyword[and] keyword[not] identifier[plugin] . identifier[search_fields] :
keyword[raise] identifier[SkipItem]
keyword[if] keyword[not] identifier[plugin] . identifier[search_output] :
identifier[output] = identifier[ContentItemOutput] ( literal[string] , identifier[cacheable] = keyword[False] )
keyword[else] :
identifier[output] = identifier[super] ( identifier[SearchRenderingPipe] , identifier[self] ). identifier[render_item] ( identifier[contentitem] )
keyword[if] identifier[plugin] . identifier[search_fields] :
identifier[output] . identifier[html] += identifier[plugin] . identifier[get_search_text] ( identifier[contentitem] )
identifier[output] . identifier[cacheable] = keyword[False]
keyword[return] identifier[output] | def render_item(self, contentitem):
"""
Render the item - but render as search text instead.
"""
plugin = contentitem.plugin
if not plugin.search_output and (not plugin.search_fields):
# Only render items when the item was output will be indexed.
raise SkipItem # depends on [control=['if'], data=[]]
if not plugin.search_output:
output = ContentItemOutput('', cacheable=False) # depends on [control=['if'], data=[]]
else:
output = super(SearchRenderingPipe, self).render_item(contentitem)
if plugin.search_fields:
# Just add the results into the output, but avoid caching that somewhere.
output.html += plugin.get_search_text(contentitem)
output.cacheable = False # depends on [control=['if'], data=[]]
return output |
def TaubinSVD(XY):
"""
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
"""
XY = numpy.array(XY)
X = XY[:,0] - numpy.mean(XY[:,0]) # norming points by x avg
Y = XY[:,1] - numpy.mean(XY[:,1]) # norming points by y avg
centroid = [numpy.mean(XY[:,0]), numpy.mean(XY[:,1])]
Z = X * X + Y * Y
Zmean = numpy.mean(Z)
Z0 = old_div((Z - Zmean), (2. * numpy.sqrt(Zmean)))
ZXY = numpy.array([Z0, X, Y]).T
U, S, V = numpy.linalg.svd(ZXY, full_matrices=False) #
V = V.transpose()
A = V[:,2]
A[0] = old_div(A[0], (2. * numpy.sqrt(Zmean)))
A = numpy.concatenate([A, [(-1. * Zmean * A[0])]], axis=0)
a, b = (-1 * A[1:3]) / A[0] / 2 + centroid
r = numpy.sqrt(A[1]*A[1]+A[2]*A[2]-4*A[0]*A[3])/abs(A[0])/2;
return a,b,r | def function[TaubinSVD, parameter[XY]]:
constant[
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
]
variable[XY] assign[=] call[name[numpy].array, parameter[name[XY]]]
variable[X] assign[=] binary_operation[call[name[XY]][tuple[[<ast.Slice object at 0x7da1b04d52d0>, <ast.Constant object at 0x7da1b04d6f80>]]] - call[name[numpy].mean, parameter[call[name[XY]][tuple[[<ast.Slice object at 0x7da1b04d5bd0>, <ast.Constant object at 0x7da1b04d7370>]]]]]]
variable[Y] assign[=] binary_operation[call[name[XY]][tuple[[<ast.Slice object at 0x7da1b04d7a60>, <ast.Constant object at 0x7da1b04d48e0>]]] - call[name[numpy].mean, parameter[call[name[XY]][tuple[[<ast.Slice object at 0x7da1b04d6410>, <ast.Constant object at 0x7da1b04d6230>]]]]]]
variable[centroid] assign[=] list[[<ast.Call object at 0x7da1b04d5510>, <ast.Call object at 0x7da1b04d4cd0>]]
variable[Z] assign[=] binary_operation[binary_operation[name[X] * name[X]] + binary_operation[name[Y] * name[Y]]]
variable[Zmean] assign[=] call[name[numpy].mean, parameter[name[Z]]]
variable[Z0] assign[=] call[name[old_div], parameter[binary_operation[name[Z] - name[Zmean]], binary_operation[constant[2.0] * call[name[numpy].sqrt, parameter[name[Zmean]]]]]]
variable[ZXY] assign[=] call[name[numpy].array, parameter[list[[<ast.Name object at 0x7da1b04d5a80>, <ast.Name object at 0x7da1b04d6140>, <ast.Name object at 0x7da1b04d4280>]]]].T
<ast.Tuple object at 0x7da1b04d5750> assign[=] call[name[numpy].linalg.svd, parameter[name[ZXY]]]
variable[V] assign[=] call[name[V].transpose, parameter[]]
variable[A] assign[=] call[name[V]][tuple[[<ast.Slice object at 0x7da1b04d6c50>, <ast.Constant object at 0x7da1b04d79d0>]]]
call[name[A]][constant[0]] assign[=] call[name[old_div], parameter[call[name[A]][constant[0]], binary_operation[constant[2.0] * call[name[numpy].sqrt, parameter[name[Zmean]]]]]]
variable[A] assign[=] call[name[numpy].concatenate, parameter[list[[<ast.Name object at 0x7da1b04d77c0>, <ast.List object at 0x7da1b04d6b60>]]]]
<ast.Tuple object at 0x7da1b04d7c70> assign[=] binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b04d4d60> * call[name[A]][<ast.Slice object at 0x7da1b04d79a0>]] / call[name[A]][constant[0]]] / constant[2]] + name[centroid]]
variable[r] assign[=] binary_operation[binary_operation[call[name[numpy].sqrt, parameter[binary_operation[binary_operation[binary_operation[call[name[A]][constant[1]] * call[name[A]][constant[1]]] + binary_operation[call[name[A]][constant[2]] * call[name[A]][constant[2]]]] - binary_operation[binary_operation[constant[4] * call[name[A]][constant[0]]] * call[name[A]][constant[3]]]]]] / call[name[abs], parameter[call[name[A]][constant[0]]]]] / constant[2]]
return[tuple[[<ast.Name object at 0x7da1b049eb00>, <ast.Name object at 0x7da1b049ebc0>, <ast.Name object at 0x7da1b049edd0>]]] | keyword[def] identifier[TaubinSVD] ( identifier[XY] ):
literal[string]
identifier[XY] = identifier[numpy] . identifier[array] ( identifier[XY] )
identifier[X] = identifier[XY] [:, literal[int] ]- identifier[numpy] . identifier[mean] ( identifier[XY] [:, literal[int] ])
identifier[Y] = identifier[XY] [:, literal[int] ]- identifier[numpy] . identifier[mean] ( identifier[XY] [:, literal[int] ])
identifier[centroid] =[ identifier[numpy] . identifier[mean] ( identifier[XY] [:, literal[int] ]), identifier[numpy] . identifier[mean] ( identifier[XY] [:, literal[int] ])]
identifier[Z] = identifier[X] * identifier[X] + identifier[Y] * identifier[Y]
identifier[Zmean] = identifier[numpy] . identifier[mean] ( identifier[Z] )
identifier[Z0] = identifier[old_div] (( identifier[Z] - identifier[Zmean] ),( literal[int] * identifier[numpy] . identifier[sqrt] ( identifier[Zmean] )))
identifier[ZXY] = identifier[numpy] . identifier[array] ([ identifier[Z0] , identifier[X] , identifier[Y] ]). identifier[T]
identifier[U] , identifier[S] , identifier[V] = identifier[numpy] . identifier[linalg] . identifier[svd] ( identifier[ZXY] , identifier[full_matrices] = keyword[False] )
identifier[V] = identifier[V] . identifier[transpose] ()
identifier[A] = identifier[V] [:, literal[int] ]
identifier[A] [ literal[int] ]= identifier[old_div] ( identifier[A] [ literal[int] ],( literal[int] * identifier[numpy] . identifier[sqrt] ( identifier[Zmean] )))
identifier[A] = identifier[numpy] . identifier[concatenate] ([ identifier[A] ,[(- literal[int] * identifier[Zmean] * identifier[A] [ literal[int] ])]], identifier[axis] = literal[int] )
identifier[a] , identifier[b] =(- literal[int] * identifier[A] [ literal[int] : literal[int] ])/ identifier[A] [ literal[int] ]/ literal[int] + identifier[centroid]
identifier[r] = identifier[numpy] . identifier[sqrt] ( identifier[A] [ literal[int] ]* identifier[A] [ literal[int] ]+ identifier[A] [ literal[int] ]* identifier[A] [ literal[int] ]- literal[int] * identifier[A] [ literal[int] ]* identifier[A] [ literal[int] ])/ identifier[abs] ( identifier[A] [ literal[int] ])/ literal[int] ;
keyword[return] identifier[a] , identifier[b] , identifier[r] | def TaubinSVD(XY):
"""
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
"""
XY = numpy.array(XY)
X = XY[:, 0] - numpy.mean(XY[:, 0]) # norming points by x avg
Y = XY[:, 1] - numpy.mean(XY[:, 1]) # norming points by y avg
centroid = [numpy.mean(XY[:, 0]), numpy.mean(XY[:, 1])]
Z = X * X + Y * Y
Zmean = numpy.mean(Z)
Z0 = old_div(Z - Zmean, 2.0 * numpy.sqrt(Zmean))
ZXY = numpy.array([Z0, X, Y]).T
(U, S, V) = numpy.linalg.svd(ZXY, full_matrices=False) #
V = V.transpose()
A = V[:, 2]
A[0] = old_div(A[0], 2.0 * numpy.sqrt(Zmean))
A = numpy.concatenate([A, [-1.0 * Zmean * A[0]]], axis=0)
(a, b) = -1 * A[1:3] / A[0] / 2 + centroid
r = numpy.sqrt(A[1] * A[1] + A[2] * A[2] - 4 * A[0] * A[3]) / abs(A[0]) / 2
return (a, b, r) |
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps)) | def function[ms_to_frames, parameter[ms, fps]]:
constant[
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
]
if compare[name[fps] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6a9540>
return[call[name[int], parameter[call[name[round], parameter[binary_operation[binary_operation[name[ms] / constant[1000]] * name[fps]]]]]]] | keyword[def] identifier[ms_to_frames] ( identifier[ms] , identifier[fps] ):
literal[string]
keyword[if] identifier[fps] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[fps] )
keyword[return] identifier[int] ( identifier[round] (( identifier[ms] / literal[int] )* identifier[fps] )) | def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError('Framerate must be positive number (%f).' % fps) # depends on [control=['if'], data=['fps']]
return int(round(ms / 1000 * fps)) |
def entry(self):
"""
Connects to Youtube Api and retrieves the video entry object
Return:
gdata.youtube.YouTubeVideoEntry
"""
api = Api()
api.authenticate()
return api.fetch_video(self.video_id) | def function[entry, parameter[self]]:
constant[
Connects to Youtube Api and retrieves the video entry object
Return:
gdata.youtube.YouTubeVideoEntry
]
variable[api] assign[=] call[name[Api], parameter[]]
call[name[api].authenticate, parameter[]]
return[call[name[api].fetch_video, parameter[name[self].video_id]]] | keyword[def] identifier[entry] ( identifier[self] ):
literal[string]
identifier[api] = identifier[Api] ()
identifier[api] . identifier[authenticate] ()
keyword[return] identifier[api] . identifier[fetch_video] ( identifier[self] . identifier[video_id] ) | def entry(self):
"""
Connects to Youtube Api and retrieves the video entry object
Return:
gdata.youtube.YouTubeVideoEntry
"""
api = Api()
api.authenticate()
return api.fetch_video(self.video_id) |
def mk_dropdown_tree(cls, model, root_node, for_node=None):
'''
Override of ``treebeard`` method to enforce the same root.
'''
options = []
# The difference is that we only generate the subtree for the current root.
logger.debug("Using root node pk of %s" % root_node.pk)
cls.add_subtree(for_node, root_node, options)
return options[1:] | def function[mk_dropdown_tree, parameter[cls, model, root_node, for_node]]:
constant[
Override of ``treebeard`` method to enforce the same root.
]
variable[options] assign[=] list[[]]
call[name[logger].debug, parameter[binary_operation[constant[Using root node pk of %s] <ast.Mod object at 0x7da2590d6920> name[root_node].pk]]]
call[name[cls].add_subtree, parameter[name[for_node], name[root_node], name[options]]]
return[call[name[options]][<ast.Slice object at 0x7da204961f00>]] | keyword[def] identifier[mk_dropdown_tree] ( identifier[cls] , identifier[model] , identifier[root_node] , identifier[for_node] = keyword[None] ):
literal[string]
identifier[options] =[]
identifier[logger] . identifier[debug] ( literal[string] % identifier[root_node] . identifier[pk] )
identifier[cls] . identifier[add_subtree] ( identifier[for_node] , identifier[root_node] , identifier[options] )
keyword[return] identifier[options] [ literal[int] :] | def mk_dropdown_tree(cls, model, root_node, for_node=None):
"""
Override of ``treebeard`` method to enforce the same root.
"""
options = []
# The difference is that we only generate the subtree for the current root.
logger.debug('Using root node pk of %s' % root_node.pk)
cls.add_subtree(for_node, root_node, options)
return options[1:] |
def range(self, *args):
""" Generate an integer Array containing an arithmetic progression.
"""
args = list(args)
args.insert(0, self.obj)
return self._wrap(range(*args)) | def function[range, parameter[self]]:
constant[ Generate an integer Array containing an arithmetic progression.
]
variable[args] assign[=] call[name[list], parameter[name[args]]]
call[name[args].insert, parameter[constant[0], name[self].obj]]
return[call[name[self]._wrap, parameter[call[name[range], parameter[<ast.Starred object at 0x7da2041d8eb0>]]]]] | keyword[def] identifier[range] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[args] = identifier[list] ( identifier[args] )
identifier[args] . identifier[insert] ( literal[int] , identifier[self] . identifier[obj] )
keyword[return] identifier[self] . identifier[_wrap] ( identifier[range] (* identifier[args] )) | def range(self, *args):
""" Generate an integer Array containing an arithmetic progression.
"""
args = list(args)
args.insert(0, self.obj)
return self._wrap(range(*args)) |
def expression_terminal(self, text):
"""expression_terminal = identifier
| terminal
| option_group
| repetition_group
| grouping_group
| special_handling ;
"""
self._attempting(text)
return alternation([
self.identifier,
self.terminal,
self.option_group,
self.repetition_group,
self.grouping_group,
self.special_handling
])(text) | def function[expression_terminal, parameter[self, text]]:
constant[expression_terminal = identifier
| terminal
| option_group
| repetition_group
| grouping_group
| special_handling ;
]
call[name[self]._attempting, parameter[name[text]]]
return[call[call[name[alternation], parameter[list[[<ast.Attribute object at 0x7da1b0240dc0>, <ast.Attribute object at 0x7da1b02425c0>, <ast.Attribute object at 0x7da1b0241720>, <ast.Attribute object at 0x7da1b0243760>, <ast.Attribute object at 0x7da1b0241de0>, <ast.Attribute object at 0x7da1b0240ee0>]]]], parameter[name[text]]]] | keyword[def] identifier[expression_terminal] ( identifier[self] , identifier[text] ):
literal[string]
identifier[self] . identifier[_attempting] ( identifier[text] )
keyword[return] identifier[alternation] ([
identifier[self] . identifier[identifier] ,
identifier[self] . identifier[terminal] ,
identifier[self] . identifier[option_group] ,
identifier[self] . identifier[repetition_group] ,
identifier[self] . identifier[grouping_group] ,
identifier[self] . identifier[special_handling]
])( identifier[text] ) | def expression_terminal(self, text):
"""expression_terminal = identifier
| terminal
| option_group
| repetition_group
| grouping_group
| special_handling ;
"""
self._attempting(text)
return alternation([self.identifier, self.terminal, self.option_group, self.repetition_group, self.grouping_group, self.special_handling])(text) |
def format(self):
"""Format the message parts to a string.
:Returns: A string of all the message parts separated into paragraphs,
with header and footer paragraphs if they were specified in the
constructor.
"""
message = ""
if self.paragraphs:
if self.header:
message += (self.header + '\n\n')
message += "\n\n".join(self.paragraphs)
if self.footer:
message += ('\n\n' + self.footer)
return message | def function[format, parameter[self]]:
constant[Format the message parts to a string.
:Returns: A string of all the message parts separated into paragraphs,
with header and footer paragraphs if they were specified in the
constructor.
]
variable[message] assign[=] constant[]
if name[self].paragraphs begin[:]
if name[self].header begin[:]
<ast.AugAssign object at 0x7da18c4ceb60>
<ast.AugAssign object at 0x7da18c4ce7d0>
if name[self].footer begin[:]
<ast.AugAssign object at 0x7da18c4ce410>
return[name[message]] | keyword[def] identifier[format] ( identifier[self] ):
literal[string]
identifier[message] = literal[string]
keyword[if] identifier[self] . identifier[paragraphs] :
keyword[if] identifier[self] . identifier[header] :
identifier[message] +=( identifier[self] . identifier[header] + literal[string] )
identifier[message] += literal[string] . identifier[join] ( identifier[self] . identifier[paragraphs] )
keyword[if] identifier[self] . identifier[footer] :
identifier[message] +=( literal[string] + identifier[self] . identifier[footer] )
keyword[return] identifier[message] | def format(self):
"""Format the message parts to a string.
:Returns: A string of all the message parts separated into paragraphs,
with header and footer paragraphs if they were specified in the
constructor.
"""
message = ''
if self.paragraphs:
if self.header:
message += self.header + '\n\n' # depends on [control=['if'], data=[]]
message += '\n\n'.join(self.paragraphs)
if self.footer:
message += '\n\n' + self.footer # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return message |
def blank_canvas(width, height):
"""Return a blank canvas to annotate.
:param width: xdim (int)
:param height: ydim (int)
:returns: :class:`jicbioimage.illustrate.Canvas`
"""
canvas = np.zeros((height, width, 3), dtype=np.uint8)
return canvas.view(Canvas) | def function[blank_canvas, parameter[width, height]]:
constant[Return a blank canvas to annotate.
:param width: xdim (int)
:param height: ydim (int)
:returns: :class:`jicbioimage.illustrate.Canvas`
]
variable[canvas] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c6c4be0>, <ast.Name object at 0x7da20c6c6a70>, <ast.Constant object at 0x7da20c6c7d00>]]]]
return[call[name[canvas].view, parameter[name[Canvas]]]] | keyword[def] identifier[blank_canvas] ( identifier[width] , identifier[height] ):
literal[string]
identifier[canvas] = identifier[np] . identifier[zeros] (( identifier[height] , identifier[width] , literal[int] ), identifier[dtype] = identifier[np] . identifier[uint8] )
keyword[return] identifier[canvas] . identifier[view] ( identifier[Canvas] ) | def blank_canvas(width, height):
"""Return a blank canvas to annotate.
:param width: xdim (int)
:param height: ydim (int)
:returns: :class:`jicbioimage.illustrate.Canvas`
"""
canvas = np.zeros((height, width, 3), dtype=np.uint8)
return canvas.view(Canvas) |
def _validate_schema(schema, body):
"""Validate data against a schema"""
# Note
#
# Schema validation is currently the major CPU bottleneck of
# BigchainDB. the `jsonschema` library validates python data structures
# directly and produces nice error messages, but validation takes 4+ ms
# per transaction which is pretty slow. The rapidjson library validates
# much faster at 1.5ms, however it produces _very_ poor error messages.
# For this reason we use both, rapidjson as an optimistic pathway and
# jsonschema as a fallback in case there is a failure, so we can produce
# a helpful error message.
try:
schema[1](rapidjson.dumps(body))
except ValueError as exc:
try:
jsonschema.validate(body, schema[0])
except jsonschema.ValidationError as exc2:
raise SchemaValidationError(str(exc2)) from exc2
logger.warning('code problem: jsonschema did not raise an exception, wheras rapidjson raised %s', exc)
raise SchemaValidationError(str(exc)) from exc | def function[_validate_schema, parameter[schema, body]]:
constant[Validate data against a schema]
<ast.Try object at 0x7da1b1b03910> | keyword[def] identifier[_validate_schema] ( identifier[schema] , identifier[body] ):
literal[string]
keyword[try] :
identifier[schema] [ literal[int] ]( identifier[rapidjson] . identifier[dumps] ( identifier[body] ))
keyword[except] identifier[ValueError] keyword[as] identifier[exc] :
keyword[try] :
identifier[jsonschema] . identifier[validate] ( identifier[body] , identifier[schema] [ literal[int] ])
keyword[except] identifier[jsonschema] . identifier[ValidationError] keyword[as] identifier[exc2] :
keyword[raise] identifier[SchemaValidationError] ( identifier[str] ( identifier[exc2] )) keyword[from] identifier[exc2]
identifier[logger] . identifier[warning] ( literal[string] , identifier[exc] )
keyword[raise] identifier[SchemaValidationError] ( identifier[str] ( identifier[exc] )) keyword[from] identifier[exc] | def _validate_schema(schema, body):
"""Validate data against a schema"""
# Note
#
# Schema validation is currently the major CPU bottleneck of
# BigchainDB. the `jsonschema` library validates python data structures
# directly and produces nice error messages, but validation takes 4+ ms
# per transaction which is pretty slow. The rapidjson library validates
# much faster at 1.5ms, however it produces _very_ poor error messages.
# For this reason we use both, rapidjson as an optimistic pathway and
# jsonschema as a fallback in case there is a failure, so we can produce
# a helpful error message.
try:
schema[1](rapidjson.dumps(body)) # depends on [control=['try'], data=[]]
except ValueError as exc:
try:
jsonschema.validate(body, schema[0]) # depends on [control=['try'], data=[]]
except jsonschema.ValidationError as exc2:
raise SchemaValidationError(str(exc2)) from exc2 # depends on [control=['except'], data=['exc2']]
logger.warning('code problem: jsonschema did not raise an exception, wheras rapidjson raised %s', exc)
raise SchemaValidationError(str(exc)) from exc # depends on [control=['except'], data=['exc']] |
def map(cls, obj, mode='data', backend=None):
"""
Applies compositor operations to any HoloViews element or container
using the map method.
"""
from .overlay import CompositeOverlay
element_compositors = [c for c in cls.definitions if len(c._pattern_spec) == 1]
overlay_compositors = [c for c in cls.definitions if len(c._pattern_spec) > 1]
if overlay_compositors:
obj = obj.map(lambda obj: cls.collapse_element(obj, mode=mode, backend=backend),
[CompositeOverlay])
element_patterns = [c.pattern for c in element_compositors]
if element_compositors and obj.traverse(lambda x: x, element_patterns):
obj = obj.map(lambda obj: cls.collapse_element(obj, mode=mode, backend=backend),
element_patterns)
return obj | def function[map, parameter[cls, obj, mode, backend]]:
constant[
Applies compositor operations to any HoloViews element or container
using the map method.
]
from relative_module[overlay] import module[CompositeOverlay]
variable[element_compositors] assign[=] <ast.ListComp object at 0x7da2054a7850>
variable[overlay_compositors] assign[=] <ast.ListComp object at 0x7da2054a4a90>
if name[overlay_compositors] begin[:]
variable[obj] assign[=] call[name[obj].map, parameter[<ast.Lambda object at 0x7da2054a58a0>, list[[<ast.Name object at 0x7da2054a49d0>]]]]
variable[element_patterns] assign[=] <ast.ListComp object at 0x7da2054a4640>
if <ast.BoolOp object at 0x7da2054a4e80> begin[:]
variable[obj] assign[=] call[name[obj].map, parameter[<ast.Lambda object at 0x7da2054a6440>, name[element_patterns]]]
return[name[obj]] | keyword[def] identifier[map] ( identifier[cls] , identifier[obj] , identifier[mode] = literal[string] , identifier[backend] = keyword[None] ):
literal[string]
keyword[from] . identifier[overlay] keyword[import] identifier[CompositeOverlay]
identifier[element_compositors] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[cls] . identifier[definitions] keyword[if] identifier[len] ( identifier[c] . identifier[_pattern_spec] )== literal[int] ]
identifier[overlay_compositors] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[cls] . identifier[definitions] keyword[if] identifier[len] ( identifier[c] . identifier[_pattern_spec] )> literal[int] ]
keyword[if] identifier[overlay_compositors] :
identifier[obj] = identifier[obj] . identifier[map] ( keyword[lambda] identifier[obj] : identifier[cls] . identifier[collapse_element] ( identifier[obj] , identifier[mode] = identifier[mode] , identifier[backend] = identifier[backend] ),
[ identifier[CompositeOverlay] ])
identifier[element_patterns] =[ identifier[c] . identifier[pattern] keyword[for] identifier[c] keyword[in] identifier[element_compositors] ]
keyword[if] identifier[element_compositors] keyword[and] identifier[obj] . identifier[traverse] ( keyword[lambda] identifier[x] : identifier[x] , identifier[element_patterns] ):
identifier[obj] = identifier[obj] . identifier[map] ( keyword[lambda] identifier[obj] : identifier[cls] . identifier[collapse_element] ( identifier[obj] , identifier[mode] = identifier[mode] , identifier[backend] = identifier[backend] ),
identifier[element_patterns] )
keyword[return] identifier[obj] | def map(cls, obj, mode='data', backend=None):
"""
Applies compositor operations to any HoloViews element or container
using the map method.
"""
from .overlay import CompositeOverlay
element_compositors = [c for c in cls.definitions if len(c._pattern_spec) == 1]
overlay_compositors = [c for c in cls.definitions if len(c._pattern_spec) > 1]
if overlay_compositors:
obj = obj.map(lambda obj: cls.collapse_element(obj, mode=mode, backend=backend), [CompositeOverlay]) # depends on [control=['if'], data=[]]
element_patterns = [c.pattern for c in element_compositors]
if element_compositors and obj.traverse(lambda x: x, element_patterns):
obj = obj.map(lambda obj: cls.collapse_element(obj, mode=mode, backend=backend), element_patterns) # depends on [control=['if'], data=[]]
return obj |
def any_ipaddress_field(field, **kwargs):
"""
Return random value for IPAddressField
>>> result = any_field(models.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> re.match(ipv4_re, result) is not None
True
"""
nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)]
return ".".join(nums) | def function[any_ipaddress_field, parameter[field]]:
constant[
Return random value for IPAddressField
>>> result = any_field(models.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> re.match(ipv4_re, result) is not None
True
]
variable[nums] assign[=] <ast.ListComp object at 0x7da1b02122c0>
return[call[constant[.].join, parameter[name[nums]]]] | keyword[def] identifier[any_ipaddress_field] ( identifier[field] ,** identifier[kwargs] ):
literal[string]
identifier[nums] =[ identifier[str] ( identifier[xunit] . identifier[any_int] ( identifier[min_value] = literal[int] , identifier[max_value] = literal[int] )) keyword[for] identifier[_] keyword[in] identifier[xrange] ( literal[int] , literal[int] )]
keyword[return] literal[string] . identifier[join] ( identifier[nums] ) | def any_ipaddress_field(field, **kwargs):
"""
Return random value for IPAddressField
>>> result = any_field(models.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> re.match(ipv4_re, result) is not None
True
"""
nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)]
return '.'.join(nums) |
async def requirements(client: Client, search: str) -> dict:
"""
GET list of requirements for a given UID/Public key
:param client: Client to connect to the api
:param search: UID or public key
:return:
"""
return await client.get(MODULE + '/requirements/%s' % search, schema=REQUIREMENTS_SCHEMA) | <ast.AsyncFunctionDef object at 0x7da204345960> | keyword[async] keyword[def] identifier[requirements] ( identifier[client] : identifier[Client] , identifier[search] : identifier[str] )-> identifier[dict] :
literal[string]
keyword[return] keyword[await] identifier[client] . identifier[get] ( identifier[MODULE] + literal[string] % identifier[search] , identifier[schema] = identifier[REQUIREMENTS_SCHEMA] ) | async def requirements(client: Client, search: str) -> dict:
"""
GET list of requirements for a given UID/Public key
:param client: Client to connect to the api
:param search: UID or public key
:return:
"""
return await client.get(MODULE + '/requirements/%s' % search, schema=REQUIREMENTS_SCHEMA) |
def _handle_list(self):
"""Handle a wiki-style list (``#``, ``*``, ``;``, ``:``)."""
self._handle_list_marker()
while self._read(1) in ("#", "*", ";", ":"):
self._head += 1
self._handle_list_marker() | def function[_handle_list, parameter[self]]:
constant[Handle a wiki-style list (``#``, ``*``, ``;``, ``:``).]
call[name[self]._handle_list_marker, parameter[]]
while compare[call[name[self]._read, parameter[constant[1]]] in tuple[[<ast.Constant object at 0x7da204345d20>, <ast.Constant object at 0x7da204345090>, <ast.Constant object at 0x7da204347430>, <ast.Constant object at 0x7da204346890>]]] begin[:]
<ast.AugAssign object at 0x7da204345e70>
call[name[self]._handle_list_marker, parameter[]] | keyword[def] identifier[_handle_list] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_handle_list_marker] ()
keyword[while] identifier[self] . identifier[_read] ( literal[int] ) keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[self] . identifier[_head] += literal[int]
identifier[self] . identifier[_handle_list_marker] () | def _handle_list(self):
"""Handle a wiki-style list (``#``, ``*``, ``;``, ``:``)."""
self._handle_list_marker()
while self._read(1) in ('#', '*', ';', ':'):
self._head += 1
self._handle_list_marker() # depends on [control=['while'], data=[]] |
def email(anon, obj, field, val):
"""
Generates a random email address.
"""
return anon.faker.email(field=field) | def function[email, parameter[anon, obj, field, val]]:
constant[
Generates a random email address.
]
return[call[name[anon].faker.email, parameter[]]] | keyword[def] identifier[email] ( identifier[anon] , identifier[obj] , identifier[field] , identifier[val] ):
literal[string]
keyword[return] identifier[anon] . identifier[faker] . identifier[email] ( identifier[field] = identifier[field] ) | def email(anon, obj, field, val):
"""
Generates a random email address.
"""
return anon.faker.email(field=field) |
def git_branch(self):
"""Git branch name."""
info = self.info
return 'rpm-{major}.{minor}.x'.format(
major=info[0], minor=info[1]) | def function[git_branch, parameter[self]]:
constant[Git branch name.]
variable[info] assign[=] name[self].info
return[call[constant[rpm-{major}.{minor}.x].format, parameter[]]] | keyword[def] identifier[git_branch] ( identifier[self] ):
literal[string]
identifier[info] = identifier[self] . identifier[info]
keyword[return] literal[string] . identifier[format] (
identifier[major] = identifier[info] [ literal[int] ], identifier[minor] = identifier[info] [ literal[int] ]) | def git_branch(self):
"""Git branch name."""
info = self.info
return 'rpm-{major}.{minor}.x'.format(major=info[0], minor=info[1]) |
def get_partitioned_query_result(self, partition_key, selector, fields=None,
raw_result=False, **kwargs):
"""
Retrieves the partitioned query result from the specified database based
on the query parameters provided.
See :func:`~cloudant.database.CouchDatabase.get_query_result` method for
further details.
:param str partition_key: Partition key.
:param str selector: Dictionary object describing criteria used to
select documents.
:param list fields: A list of fields to be returned by the query.
:param bool raw_result: Dictates whether the query result is returned
wrapped in a QueryResult or if the response JSON is returned.
Defaults to False.
:param kwargs: See
:func:`~cloudant.database.CouchDatabase.get_query_result` method for
available keyword arguments.
:returns: The result content either wrapped in a QueryResult or
as the raw response JSON content.
:rtype: QueryResult, dict
"""
query = Query(self,
selector=selector,
fields=fields,
partition_key=partition_key)
return self._get_query_result(query, raw_result, **kwargs) | def function[get_partitioned_query_result, parameter[self, partition_key, selector, fields, raw_result]]:
constant[
Retrieves the partitioned query result from the specified database based
on the query parameters provided.
See :func:`~cloudant.database.CouchDatabase.get_query_result` method for
further details.
:param str partition_key: Partition key.
:param str selector: Dictionary object describing criteria used to
select documents.
:param list fields: A list of fields to be returned by the query.
:param bool raw_result: Dictates whether the query result is returned
wrapped in a QueryResult or if the response JSON is returned.
Defaults to False.
:param kwargs: See
:func:`~cloudant.database.CouchDatabase.get_query_result` method for
available keyword arguments.
:returns: The result content either wrapped in a QueryResult or
as the raw response JSON content.
:rtype: QueryResult, dict
]
variable[query] assign[=] call[name[Query], parameter[name[self]]]
return[call[name[self]._get_query_result, parameter[name[query], name[raw_result]]]] | keyword[def] identifier[get_partitioned_query_result] ( identifier[self] , identifier[partition_key] , identifier[selector] , identifier[fields] = keyword[None] ,
identifier[raw_result] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[query] = identifier[Query] ( identifier[self] ,
identifier[selector] = identifier[selector] ,
identifier[fields] = identifier[fields] ,
identifier[partition_key] = identifier[partition_key] )
keyword[return] identifier[self] . identifier[_get_query_result] ( identifier[query] , identifier[raw_result] ,** identifier[kwargs] ) | def get_partitioned_query_result(self, partition_key, selector, fields=None, raw_result=False, **kwargs):
"""
Retrieves the partitioned query result from the specified database based
on the query parameters provided.
See :func:`~cloudant.database.CouchDatabase.get_query_result` method for
further details.
:param str partition_key: Partition key.
:param str selector: Dictionary object describing criteria used to
select documents.
:param list fields: A list of fields to be returned by the query.
:param bool raw_result: Dictates whether the query result is returned
wrapped in a QueryResult or if the response JSON is returned.
Defaults to False.
:param kwargs: See
:func:`~cloudant.database.CouchDatabase.get_query_result` method for
available keyword arguments.
:returns: The result content either wrapped in a QueryResult or
as the raw response JSON content.
:rtype: QueryResult, dict
"""
query = Query(self, selector=selector, fields=fields, partition_key=partition_key)
return self._get_query_result(query, raw_result, **kwargs) |
def random_int(maximum_value):
""" Random generator (PyCrypto getrandbits wrapper). The result is a non-negative value.
:param maximum_value: maximum integer value
:return: int
"""
if maximum_value == 0:
return 0
elif maximum_value == 1:
return random_bits(1)
bits = math.floor(math.log2(maximum_value))
result = random_bits(bits) + random_int(maximum_value - ((2 ** bits) - 1))
return result | def function[random_int, parameter[maximum_value]]:
constant[ Random generator (PyCrypto getrandbits wrapper). The result is a non-negative value.
:param maximum_value: maximum integer value
:return: int
]
if compare[name[maximum_value] equal[==] constant[0]] begin[:]
return[constant[0]]
variable[bits] assign[=] call[name[math].floor, parameter[call[name[math].log2, parameter[name[maximum_value]]]]]
variable[result] assign[=] binary_operation[call[name[random_bits], parameter[name[bits]]] + call[name[random_int], parameter[binary_operation[name[maximum_value] - binary_operation[binary_operation[constant[2] ** name[bits]] - constant[1]]]]]]
return[name[result]] | keyword[def] identifier[random_int] ( identifier[maximum_value] ):
literal[string]
keyword[if] identifier[maximum_value] == literal[int] :
keyword[return] literal[int]
keyword[elif] identifier[maximum_value] == literal[int] :
keyword[return] identifier[random_bits] ( literal[int] )
identifier[bits] = identifier[math] . identifier[floor] ( identifier[math] . identifier[log2] ( identifier[maximum_value] ))
identifier[result] = identifier[random_bits] ( identifier[bits] )+ identifier[random_int] ( identifier[maximum_value] -(( literal[int] ** identifier[bits] )- literal[int] ))
keyword[return] identifier[result] | def random_int(maximum_value):
""" Random generator (PyCrypto getrandbits wrapper). The result is a non-negative value.
:param maximum_value: maximum integer value
:return: int
"""
if maximum_value == 0:
return 0 # depends on [control=['if'], data=[]]
elif maximum_value == 1:
return random_bits(1) # depends on [control=['if'], data=[]]
bits = math.floor(math.log2(maximum_value))
result = random_bits(bits) + random_int(maximum_value - (2 ** bits - 1))
return result |
def ended(self):
"""Datetime at which the job finished.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the end time (None until set from the server).
"""
statistics = self._properties.get("statistics")
if statistics is not None:
millis = statistics.get("endTime")
if millis is not None:
return _helpers._datetime_from_microseconds(millis * 1000.0) | def function[ended, parameter[self]]:
constant[Datetime at which the job finished.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the end time (None until set from the server).
]
variable[statistics] assign[=] call[name[self]._properties.get, parameter[constant[statistics]]]
if compare[name[statistics] is_not constant[None]] begin[:]
variable[millis] assign[=] call[name[statistics].get, parameter[constant[endTime]]]
if compare[name[millis] is_not constant[None]] begin[:]
return[call[name[_helpers]._datetime_from_microseconds, parameter[binary_operation[name[millis] * constant[1000.0]]]]] | keyword[def] identifier[ended] ( identifier[self] ):
literal[string]
identifier[statistics] = identifier[self] . identifier[_properties] . identifier[get] ( literal[string] )
keyword[if] identifier[statistics] keyword[is] keyword[not] keyword[None] :
identifier[millis] = identifier[statistics] . identifier[get] ( literal[string] )
keyword[if] identifier[millis] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[_helpers] . identifier[_datetime_from_microseconds] ( identifier[millis] * literal[int] ) | def ended(self):
"""Datetime at which the job finished.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the end time (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
millis = statistics.get('endTime')
if millis is not None:
return _helpers._datetime_from_microseconds(millis * 1000.0) # depends on [control=['if'], data=['millis']] # depends on [control=['if'], data=['statistics']] |
def search(table, *args, **kwargs):
"""
Perform a regular expression search, returning rows that match a given
pattern, either anywhere in the row or within a specific field. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['orange', 12, 'oranges are nice fruit'],
... ['mango', 42, 'I like them'],
... ['banana', 74, 'lovely too'],
... ['cucumber', 41, 'better than mango']]
>>> # search any field
... table2 = etl.search(table1, '.g.')
>>> table2
+------------+-----+--------------------------+
| foo | bar | baz |
+============+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+------------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+------------+-----+--------------------------+
| 'cucumber' | 41 | 'better than mango' |
+------------+-----+--------------------------+
>>> # search a specific field
... table3 = etl.search(table1, 'foo', '.g.')
>>> table3
+----------+-----+--------------------------+
| foo | bar | baz |
+==========+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+----------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+----------+-----+--------------------------+
The complement can be found via
:func:`petl.transform.regex.searchcomplement`.
"""
if len(args) == 1:
field = None
pattern = args[0]
elif len(args) == 2:
field = args[0]
pattern = args[1]
else:
raise ArgumentError('expected 1 or 2 positional arguments')
return SearchView(table, pattern, field=field, **kwargs) | def function[search, parameter[table]]:
constant[
Perform a regular expression search, returning rows that match a given
pattern, either anywhere in the row or within a specific field. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['orange', 12, 'oranges are nice fruit'],
... ['mango', 42, 'I like them'],
... ['banana', 74, 'lovely too'],
... ['cucumber', 41, 'better than mango']]
>>> # search any field
... table2 = etl.search(table1, '.g.')
>>> table2
+------------+-----+--------------------------+
| foo | bar | baz |
+============+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+------------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+------------+-----+--------------------------+
| 'cucumber' | 41 | 'better than mango' |
+------------+-----+--------------------------+
>>> # search a specific field
... table3 = etl.search(table1, 'foo', '.g.')
>>> table3
+----------+-----+--------------------------+
| foo | bar | baz |
+==========+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+----------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+----------+-----+--------------------------+
The complement can be found via
:func:`petl.transform.regex.searchcomplement`.
]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
variable[field] assign[=] constant[None]
variable[pattern] assign[=] call[name[args]][constant[0]]
return[call[name[SearchView], parameter[name[table], name[pattern]]]] | keyword[def] identifier[search] ( identifier[table] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[field] = keyword[None]
identifier[pattern] = identifier[args] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[field] = identifier[args] [ literal[int] ]
identifier[pattern] = identifier[args] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[ArgumentError] ( literal[string] )
keyword[return] identifier[SearchView] ( identifier[table] , identifier[pattern] , identifier[field] = identifier[field] ,** identifier[kwargs] ) | def search(table, *args, **kwargs):
"""
Perform a regular expression search, returning rows that match a given
pattern, either anywhere in the row or within a specific field. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['orange', 12, 'oranges are nice fruit'],
... ['mango', 42, 'I like them'],
... ['banana', 74, 'lovely too'],
... ['cucumber', 41, 'better than mango']]
>>> # search any field
... table2 = etl.search(table1, '.g.')
>>> table2
+------------+-----+--------------------------+
| foo | bar | baz |
+============+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+------------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+------------+-----+--------------------------+
| 'cucumber' | 41 | 'better than mango' |
+------------+-----+--------------------------+
>>> # search a specific field
... table3 = etl.search(table1, 'foo', '.g.')
>>> table3
+----------+-----+--------------------------+
| foo | bar | baz |
+==========+=====+==========================+
| 'orange' | 12 | 'oranges are nice fruit' |
+----------+-----+--------------------------+
| 'mango' | 42 | 'I like them' |
+----------+-----+--------------------------+
The complement can be found via
:func:`petl.transform.regex.searchcomplement`.
"""
if len(args) == 1:
field = None
pattern = args[0] # depends on [control=['if'], data=[]]
elif len(args) == 2:
field = args[0]
pattern = args[1] # depends on [control=['if'], data=[]]
else:
raise ArgumentError('expected 1 or 2 positional arguments')
return SearchView(table, pattern, field=field, **kwargs) |
def set_location(self, place, latitude, longitude, pipe=None):
"""
Set the location of *place* to the location specified by
*latitude* and *longitude*.
*place* can be any pickle-able Python object.
"""
pipe = self.redis if pipe is None else pipe
pipe.geoadd(self.key, longitude, latitude, self._pickle(place)) | def function[set_location, parameter[self, place, latitude, longitude, pipe]]:
constant[
Set the location of *place* to the location specified by
*latitude* and *longitude*.
*place* can be any pickle-able Python object.
]
variable[pipe] assign[=] <ast.IfExp object at 0x7da18eb55ba0>
call[name[pipe].geoadd, parameter[name[self].key, name[longitude], name[latitude], call[name[self]._pickle, parameter[name[place]]]]] | keyword[def] identifier[set_location] ( identifier[self] , identifier[place] , identifier[latitude] , identifier[longitude] , identifier[pipe] = keyword[None] ):
literal[string]
identifier[pipe] = identifier[self] . identifier[redis] keyword[if] identifier[pipe] keyword[is] keyword[None] keyword[else] identifier[pipe]
identifier[pipe] . identifier[geoadd] ( identifier[self] . identifier[key] , identifier[longitude] , identifier[latitude] , identifier[self] . identifier[_pickle] ( identifier[place] )) | def set_location(self, place, latitude, longitude, pipe=None):
"""
Set the location of *place* to the location specified by
*latitude* and *longitude*.
*place* can be any pickle-able Python object.
"""
pipe = self.redis if pipe is None else pipe
pipe.geoadd(self.key, longitude, latitude, self._pickle(place)) |
def _get(self, pos):
"""loads widget at given position; handling invalid arguments"""
res = None, None
if pos is not None:
try:
res = self[pos], pos
except (IndexError, KeyError):
pass
return res | def function[_get, parameter[self, pos]]:
constant[loads widget at given position; handling invalid arguments]
variable[res] assign[=] tuple[[<ast.Constant object at 0x7da18dc044c0>, <ast.Constant object at 0x7da18dc05420>]]
if compare[name[pos] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18dc04550>
return[name[res]] | keyword[def] identifier[_get] ( identifier[self] , identifier[pos] ):
literal[string]
identifier[res] = keyword[None] , keyword[None]
keyword[if] identifier[pos] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[res] = identifier[self] [ identifier[pos] ], identifier[pos]
keyword[except] ( identifier[IndexError] , identifier[KeyError] ):
keyword[pass]
keyword[return] identifier[res] | def _get(self, pos):
"""loads widget at given position; handling invalid arguments"""
res = (None, None)
if pos is not None:
try:
res = (self[pos], pos) # depends on [control=['try'], data=[]]
except (IndexError, KeyError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['pos']]
return res |
def _QueryChangeFeed(self, collection_link, resource_type, options=None, partition_key_range_id=None):
"""Queries change feed of a resource in a collection.
:param str collection_link:
The link to the document collection.
:param str resource_type:
The type of the resource.
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
options['changeFeed'] = True
resource_key_map = {'Documents' : 'docs'}
# For now, change feed only supports Documents and Partition Key Range resouce type
if resource_type not in resource_key_map:
raise NotImplementedError(resource_type + " change feed query is not supported.")
resource_key = resource_key_map[resource_type]
path = base.GetPathFromLink(collection_link, resource_key)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return self.__QueryFeed(path,
resource_key,
collection_id,
lambda r: r[resource_type],
lambda _, b: b,
None,
options,
partition_key_range_id), self.last_response_headers
return query_iterable.QueryIterable(self, None, options, fetch_fn, collection_link) | def function[_QueryChangeFeed, parameter[self, collection_link, resource_type, options, partition_key_range_id]]:
constant[Queries change feed of a resource in a collection.
:param str collection_link:
The link to the document collection.
:param str resource_type:
The type of the resource.
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
]
if compare[name[options] is constant[None]] begin[:]
variable[options] assign[=] dictionary[[], []]
call[name[options]][constant[changeFeed]] assign[=] constant[True]
variable[resource_key_map] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c4ca0>], [<ast.Constant object at 0x7da20c6c7400>]]
if compare[name[resource_type] <ast.NotIn object at 0x7da2590d7190> name[resource_key_map]] begin[:]
<ast.Raise object at 0x7da20c6c7c70>
variable[resource_key] assign[=] call[name[resource_key_map]][name[resource_type]]
variable[path] assign[=] call[name[base].GetPathFromLink, parameter[name[collection_link], name[resource_key]]]
variable[collection_id] assign[=] call[name[base].GetResourceIdOrFullNameFromLink, parameter[name[collection_link]]]
def function[fetch_fn, parameter[options]]:
return[tuple[[<ast.Call object at 0x7da1b18e6680>, <ast.Attribute object at 0x7da1b18e40d0>]]]
return[call[name[query_iterable].QueryIterable, parameter[name[self], constant[None], name[options], name[fetch_fn], name[collection_link]]]] | keyword[def] identifier[_QueryChangeFeed] ( identifier[self] , identifier[collection_link] , identifier[resource_type] , identifier[options] = keyword[None] , identifier[partition_key_range_id] = keyword[None] ):
literal[string]
keyword[if] identifier[options] keyword[is] keyword[None] :
identifier[options] ={}
identifier[options] [ literal[string] ]= keyword[True]
identifier[resource_key_map] ={ literal[string] : literal[string] }
keyword[if] identifier[resource_type] keyword[not] keyword[in] identifier[resource_key_map] :
keyword[raise] identifier[NotImplementedError] ( identifier[resource_type] + literal[string] )
identifier[resource_key] = identifier[resource_key_map] [ identifier[resource_type] ]
identifier[path] = identifier[base] . identifier[GetPathFromLink] ( identifier[collection_link] , identifier[resource_key] )
identifier[collection_id] = identifier[base] . identifier[GetResourceIdOrFullNameFromLink] ( identifier[collection_link] )
keyword[def] identifier[fetch_fn] ( identifier[options] ):
keyword[return] identifier[self] . identifier[__QueryFeed] ( identifier[path] ,
identifier[resource_key] ,
identifier[collection_id] ,
keyword[lambda] identifier[r] : identifier[r] [ identifier[resource_type] ],
keyword[lambda] identifier[_] , identifier[b] : identifier[b] ,
keyword[None] ,
identifier[options] ,
identifier[partition_key_range_id] ), identifier[self] . identifier[last_response_headers]
keyword[return] identifier[query_iterable] . identifier[QueryIterable] ( identifier[self] , keyword[None] , identifier[options] , identifier[fetch_fn] , identifier[collection_link] ) | def _QueryChangeFeed(self, collection_link, resource_type, options=None, partition_key_range_id=None):
"""Queries change feed of a resource in a collection.
:param str collection_link:
The link to the document collection.
:param str resource_type:
The type of the resource.
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {} # depends on [control=['if'], data=['options']]
options['changeFeed'] = True
resource_key_map = {'Documents': 'docs'}
# For now, change feed only supports Documents and Partition Key Range resouce type
if resource_type not in resource_key_map:
raise NotImplementedError(resource_type + ' change feed query is not supported.') # depends on [control=['if'], data=['resource_type']]
resource_key = resource_key_map[resource_type]
path = base.GetPathFromLink(collection_link, resource_key)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return (self.__QueryFeed(path, resource_key, collection_id, lambda r: r[resource_type], lambda _, b: b, None, options, partition_key_range_id), self.last_response_headers)
return query_iterable.QueryIterable(self, None, options, fetch_fn, collection_link) |
def view_all_tags(ctx, param, value):
""" List available tags and associated modules
Called by eager click option: --view-tags
"""
# To make sure this function executed only when the flag was called
if not value or ctx.resilient_parsing:
return
avail_tags = dict()
print("\nMultiQC Available module tag groups:\n")
for mod_dict in filter(lambda mod:isinstance(mod, dict), config.module_order):
mod_key, mod_val = list(mod_dict.items())[0]
tags = list(mod_val.get('module_tag', []))
for t in tags:
if t not in avail_tags:
avail_tags[t] = []
avail_tags[t].append(mod_key)
for t in sorted(avail_tags.keys(), key=lambda s: s.lower()):
print (" - {}:".format(t))
for ttgs in avail_tags[t]:
print (" - {}".format(ttgs))
ctx.exit() | def function[view_all_tags, parameter[ctx, param, value]]:
constant[ List available tags and associated modules
Called by eager click option: --view-tags
]
if <ast.BoolOp object at 0x7da18f00c820> begin[:]
return[None]
variable[avail_tags] assign[=] call[name[dict], parameter[]]
call[name[print], parameter[constant[
MultiQC Available module tag groups:
]]]
for taget[name[mod_dict]] in starred[call[name[filter], parameter[<ast.Lambda object at 0x7da18f00f3a0>, name[config].module_order]]] begin[:]
<ast.Tuple object at 0x7da18f00c400> assign[=] call[call[name[list], parameter[call[name[mod_dict].items, parameter[]]]]][constant[0]]
variable[tags] assign[=] call[name[list], parameter[call[name[mod_val].get, parameter[constant[module_tag], list[[]]]]]]
for taget[name[t]] in starred[name[tags]] begin[:]
if compare[name[t] <ast.NotIn object at 0x7da2590d7190> name[avail_tags]] begin[:]
call[name[avail_tags]][name[t]] assign[=] list[[]]
call[call[name[avail_tags]][name[t]].append, parameter[name[mod_key]]]
for taget[name[t]] in starred[call[name[sorted], parameter[call[name[avail_tags].keys, parameter[]]]]] begin[:]
call[name[print], parameter[call[constant[ - {}:].format, parameter[name[t]]]]]
for taget[name[ttgs]] in starred[call[name[avail_tags]][name[t]]] begin[:]
call[name[print], parameter[call[constant[ - {}].format, parameter[name[ttgs]]]]]
call[name[ctx].exit, parameter[]] | keyword[def] identifier[view_all_tags] ( identifier[ctx] , identifier[param] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] keyword[or] identifier[ctx] . identifier[resilient_parsing] :
keyword[return]
identifier[avail_tags] = identifier[dict] ()
identifier[print] ( literal[string] )
keyword[for] identifier[mod_dict] keyword[in] identifier[filter] ( keyword[lambda] identifier[mod] : identifier[isinstance] ( identifier[mod] , identifier[dict] ), identifier[config] . identifier[module_order] ):
identifier[mod_key] , identifier[mod_val] = identifier[list] ( identifier[mod_dict] . identifier[items] ())[ literal[int] ]
identifier[tags] = identifier[list] ( identifier[mod_val] . identifier[get] ( literal[string] ,[]))
keyword[for] identifier[t] keyword[in] identifier[tags] :
keyword[if] identifier[t] keyword[not] keyword[in] identifier[avail_tags] :
identifier[avail_tags] [ identifier[t] ]=[]
identifier[avail_tags] [ identifier[t] ]. identifier[append] ( identifier[mod_key] )
keyword[for] identifier[t] keyword[in] identifier[sorted] ( identifier[avail_tags] . identifier[keys] (), identifier[key] = keyword[lambda] identifier[s] : identifier[s] . identifier[lower] ()):
identifier[print] ( literal[string] . identifier[format] ( identifier[t] ))
keyword[for] identifier[ttgs] keyword[in] identifier[avail_tags] [ identifier[t] ]:
identifier[print] ( literal[string] . identifier[format] ( identifier[ttgs] ))
identifier[ctx] . identifier[exit] () | def view_all_tags(ctx, param, value):
""" List available tags and associated modules
Called by eager click option: --view-tags
"""
# To make sure this function executed only when the flag was called
if not value or ctx.resilient_parsing:
return # depends on [control=['if'], data=[]]
avail_tags = dict()
print('\nMultiQC Available module tag groups:\n')
for mod_dict in filter(lambda mod: isinstance(mod, dict), config.module_order):
(mod_key, mod_val) = list(mod_dict.items())[0]
tags = list(mod_val.get('module_tag', []))
for t in tags:
if t not in avail_tags:
avail_tags[t] = [] # depends on [control=['if'], data=['t', 'avail_tags']]
avail_tags[t].append(mod_key) # depends on [control=['for'], data=['t']] # depends on [control=['for'], data=['mod_dict']]
for t in sorted(avail_tags.keys(), key=lambda s: s.lower()):
print(' - {}:'.format(t))
for ttgs in avail_tags[t]:
print(' - {}'.format(ttgs)) # depends on [control=['for'], data=['ttgs']] # depends on [control=['for'], data=['t']]
ctx.exit() |
def do_it(self, dbg):
'''Starts a thread that will load values asynchronously'''
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd) | def function[do_it, parameter[self, dbg]]:
constant[Starts a thread that will load values asynchronously]
<ast.Try object at 0x7da20c6a8ac0> | keyword[def] identifier[do_it] ( identifier[self] , identifier[dbg] ):
literal[string]
keyword[try] :
identifier[var_objects] =[]
keyword[for] identifier[variable] keyword[in] identifier[self] . identifier[vars] :
identifier[variable] = identifier[variable] . identifier[strip] ()
keyword[if] identifier[len] ( identifier[variable] )> literal[int] :
keyword[if] literal[string] keyword[in] identifier[variable] :
identifier[scope] , identifier[attrs] = identifier[variable] . identifier[split] ( literal[string] , literal[int] )
identifier[name] = identifier[attrs] [ literal[int] ]
keyword[else] :
identifier[scope] , identifier[attrs] =( identifier[variable] , keyword[None] )
identifier[name] = identifier[scope]
identifier[var_obj] = identifier[pydevd_vars] . identifier[getVariable] ( identifier[dbg] , identifier[self] . identifier[thread_id] , identifier[self] . identifier[frame_id] , identifier[scope] , identifier[attrs] )
identifier[var_objects] . identifier[append] (( identifier[var_obj] , identifier[name] ))
identifier[t] = identifier[GetValueAsyncThreadDebug] ( identifier[dbg] , identifier[self] . identifier[sequence] , identifier[var_objects] )
identifier[t] . identifier[start] ()
keyword[except] :
identifier[exc] = identifier[get_exception_traceback_str] ()
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] %( identifier[exc] ,))
identifier[cmd] = identifier[dbg] . identifier[cmd_factory] . identifier[make_error_message] ( identifier[self] . identifier[sequence] , literal[string] % identifier[exc] )
identifier[dbg] . identifier[writer] . identifier[add_command] ( identifier[cmd] ) | def do_it(self, dbg):
"""Starts a thread that will load values asynchronously"""
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
(scope, attrs) = variable.split('\t', 1)
name = attrs[0] # depends on [control=['if'], data=['variable']]
else:
(scope, attrs) = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['variable']]
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start() # depends on [control=['try'], data=[]]
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, 'Error evaluating variable %s ' % exc)
dbg.writer.add_command(cmd) # depends on [control=['except'], data=[]] |
def read_meta(path_dir, tag_model, tag_id):
'''Read meta data from Little Leonardo data header rows
Args
----
path_dir: str
Parent directory containing lleo data files
tag_model: str
Little Leonardo tag model name
tag_id: str, int
Little Leonardo tag ID number
Returns
-------
meta: dict
dictionary with meta data from header lines of lleo data files
'''
from collections import OrderedDict
import os
import yamlord
from . import utils
def _parse_meta_line(line):
'''Return key, value pair parsed from data header line'''
# Parse the key and its value from the line
key, val = line.replace(':', '').replace('"', '').split(',')
return key.strip(), val.strip()
def _read_meta_all(f, meta, n_header):
'''Read all meta data from header rows of data file'''
# Skip 'File name' line
f.seek(0)
_ = f.readline()
# Create child dictionary for channel / file
line = f.readline()
key_ch, val_ch = _parse_meta_line(line)
val_ch = utils.posix_string(val_ch)
meta['parameters'][val_ch] = OrderedDict()
# Write header values to channel dict
for _ in range(n_header-2):
line = f.readline()
key, val = _parse_meta_line(line)
meta['parameters'][val_ch][key] = val.strip()
return meta
def _create_meta(path_dir, tag_model, tag_id):
'''Create meta data dictionary'''
import datetime
from . import utils
param_strs = utils.get_tag_params(tag_model)
# Create dictionary of meta data
meta = OrderedDict()
# Create fields for the parameters in data directory name
exp_name = os.path.split(path_dir)[1]
params_tag = utils.parse_experiment_params(exp_name)
for key, value in params_tag.items():
meta[key] = value
fmt = "%Y-%m-%d %H:%M:%S"
meta['date_modified'] = datetime.datetime.now().strftime(fmt)
meta['parameters'] = OrderedDict()
for param_str in param_strs:
print('Create meta entry for {}'.format(param_str))
path_file = utils.find_file(path_dir, param_str, '.TXT')
# Get number of header rows
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
f.seek(0)
meta = _read_meta_all(f, meta, n_header=n_header)
return meta
# Load meta data from YAML file if it already exists
meta_yaml_path = os.path.join(path_dir, 'meta.yml')
# Load file if exists else create
if os.path.isfile(meta_yaml_path):
meta = yamlord.read_yaml(meta_yaml_path)
# Else create meta dictionary and save to YAML
else:
meta = _create_meta(path_dir, tag_model, tag_id)
yamlord.write_yaml(meta, meta_yaml_path)
return meta | def function[read_meta, parameter[path_dir, tag_model, tag_id]]:
constant[Read meta data from Little Leonardo data header rows
Args
----
path_dir: str
Parent directory containing lleo data files
tag_model: str
Little Leonardo tag model name
tag_id: str, int
Little Leonardo tag ID number
Returns
-------
meta: dict
dictionary with meta data from header lines of lleo data files
]
from relative_module[collections] import module[OrderedDict]
import module[os]
import module[yamlord]
from relative_module[None] import module[utils]
def function[_parse_meta_line, parameter[line]]:
constant[Return key, value pair parsed from data header line]
<ast.Tuple object at 0x7da207f995a0> assign[=] call[call[call[name[line].replace, parameter[constant[:], constant[]]].replace, parameter[constant["], constant[]]].split, parameter[constant[,]]]
return[tuple[[<ast.Call object at 0x7da207f99090>, <ast.Call object at 0x7da207f9b8e0>]]]
def function[_read_meta_all, parameter[f, meta, n_header]]:
constant[Read all meta data from header rows of data file]
call[name[f].seek, parameter[constant[0]]]
variable[_] assign[=] call[name[f].readline, parameter[]]
variable[line] assign[=] call[name[f].readline, parameter[]]
<ast.Tuple object at 0x7da207f9ba30> assign[=] call[name[_parse_meta_line], parameter[name[line]]]
variable[val_ch] assign[=] call[name[utils].posix_string, parameter[name[val_ch]]]
call[call[name[meta]][constant[parameters]]][name[val_ch]] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[_]] in starred[call[name[range], parameter[binary_operation[name[n_header] - constant[2]]]]] begin[:]
variable[line] assign[=] call[name[f].readline, parameter[]]
<ast.Tuple object at 0x7da207f995d0> assign[=] call[name[_parse_meta_line], parameter[name[line]]]
call[call[call[name[meta]][constant[parameters]]][name[val_ch]]][name[key]] assign[=] call[name[val].strip, parameter[]]
return[name[meta]]
def function[_create_meta, parameter[path_dir, tag_model, tag_id]]:
constant[Create meta data dictionary]
import module[datetime]
from relative_module[None] import module[utils]
variable[param_strs] assign[=] call[name[utils].get_tag_params, parameter[name[tag_model]]]
variable[meta] assign[=] call[name[OrderedDict], parameter[]]
variable[exp_name] assign[=] call[call[name[os].path.split, parameter[name[path_dir]]]][constant[1]]
variable[params_tag] assign[=] call[name[utils].parse_experiment_params, parameter[name[exp_name]]]
for taget[tuple[[<ast.Name object at 0x7da1b14704f0>, <ast.Name object at 0x7da1b1470340>]]] in starred[call[name[params_tag].items, parameter[]]] begin[:]
call[name[meta]][name[key]] assign[=] name[value]
variable[fmt] assign[=] constant[%Y-%m-%d %H:%M:%S]
call[name[meta]][constant[date_modified]] assign[=] call[call[name[datetime].datetime.now, parameter[]].strftime, parameter[name[fmt]]]
call[name[meta]][constant[parameters]] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[param_str]] in starred[name[param_strs]] begin[:]
call[name[print], parameter[call[constant[Create meta entry for {}].format, parameter[name[param_str]]]]]
variable[path_file] assign[=] call[name[utils].find_file, parameter[name[path_dir], name[param_str], constant[.TXT]]]
variable[enc] assign[=] call[name[utils].predict_encoding, parameter[name[path_file]]]
with call[name[open], parameter[name[path_file], constant[r]]] begin[:]
variable[n_header] assign[=] call[name[utils].get_n_header, parameter[name[f]]]
call[name[f].seek, parameter[constant[0]]]
variable[meta] assign[=] call[name[_read_meta_all], parameter[name[f], name[meta]]]
return[name[meta]]
variable[meta_yaml_path] assign[=] call[name[os].path.join, parameter[name[path_dir], constant[meta.yml]]]
if call[name[os].path.isfile, parameter[name[meta_yaml_path]]] begin[:]
variable[meta] assign[=] call[name[yamlord].read_yaml, parameter[name[meta_yaml_path]]]
return[name[meta]] | keyword[def] identifier[read_meta] ( identifier[path_dir] , identifier[tag_model] , identifier[tag_id] ):
literal[string]
keyword[from] identifier[collections] keyword[import] identifier[OrderedDict]
keyword[import] identifier[os]
keyword[import] identifier[yamlord]
keyword[from] . keyword[import] identifier[utils]
keyword[def] identifier[_parse_meta_line] ( identifier[line] ):
literal[string]
identifier[key] , identifier[val] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
keyword[return] identifier[key] . identifier[strip] (), identifier[val] . identifier[strip] ()
keyword[def] identifier[_read_meta_all] ( identifier[f] , identifier[meta] , identifier[n_header] ):
literal[string]
identifier[f] . identifier[seek] ( literal[int] )
identifier[_] = identifier[f] . identifier[readline] ()
identifier[line] = identifier[f] . identifier[readline] ()
identifier[key_ch] , identifier[val_ch] = identifier[_parse_meta_line] ( identifier[line] )
identifier[val_ch] = identifier[utils] . identifier[posix_string] ( identifier[val_ch] )
identifier[meta] [ literal[string] ][ identifier[val_ch] ]= identifier[OrderedDict] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[n_header] - literal[int] ):
identifier[line] = identifier[f] . identifier[readline] ()
identifier[key] , identifier[val] = identifier[_parse_meta_line] ( identifier[line] )
identifier[meta] [ literal[string] ][ identifier[val_ch] ][ identifier[key] ]= identifier[val] . identifier[strip] ()
keyword[return] identifier[meta]
keyword[def] identifier[_create_meta] ( identifier[path_dir] , identifier[tag_model] , identifier[tag_id] ):
literal[string]
keyword[import] identifier[datetime]
keyword[from] . keyword[import] identifier[utils]
identifier[param_strs] = identifier[utils] . identifier[get_tag_params] ( identifier[tag_model] )
identifier[meta] = identifier[OrderedDict] ()
identifier[exp_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[path_dir] )[ literal[int] ]
identifier[params_tag] = identifier[utils] . identifier[parse_experiment_params] ( identifier[exp_name] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[params_tag] . identifier[items] ():
identifier[meta] [ identifier[key] ]= identifier[value]
identifier[fmt] = literal[string]
identifier[meta] [ literal[string] ]= identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( identifier[fmt] )
identifier[meta] [ literal[string] ]= identifier[OrderedDict] ()
keyword[for] identifier[param_str] keyword[in] identifier[param_strs] :
identifier[print] ( literal[string] . identifier[format] ( identifier[param_str] ))
identifier[path_file] = identifier[utils] . identifier[find_file] ( identifier[path_dir] , identifier[param_str] , literal[string] )
identifier[enc] = identifier[utils] . identifier[predict_encoding] ( identifier[path_file] , identifier[n_lines] = literal[int] )
keyword[with] identifier[open] ( identifier[path_file] , literal[string] , identifier[encoding] = identifier[enc] ) keyword[as] identifier[f] :
identifier[n_header] = identifier[utils] . identifier[get_n_header] ( identifier[f] )
identifier[f] . identifier[seek] ( literal[int] )
identifier[meta] = identifier[_read_meta_all] ( identifier[f] , identifier[meta] , identifier[n_header] = identifier[n_header] )
keyword[return] identifier[meta]
identifier[meta_yaml_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path_dir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[meta_yaml_path] ):
identifier[meta] = identifier[yamlord] . identifier[read_yaml] ( identifier[meta_yaml_path] )
keyword[else] :
identifier[meta] = identifier[_create_meta] ( identifier[path_dir] , identifier[tag_model] , identifier[tag_id] )
identifier[yamlord] . identifier[write_yaml] ( identifier[meta] , identifier[meta_yaml_path] )
keyword[return] identifier[meta] | def read_meta(path_dir, tag_model, tag_id):
"""Read meta data from Little Leonardo data header rows
Args
----
path_dir: str
Parent directory containing lleo data files
tag_model: str
Little Leonardo tag model name
tag_id: str, int
Little Leonardo tag ID number
Returns
-------
meta: dict
dictionary with meta data from header lines of lleo data files
"""
from collections import OrderedDict
import os
import yamlord
from . import utils
def _parse_meta_line(line):
"""Return key, value pair parsed from data header line"""
# Parse the key and its value from the line
(key, val) = line.replace(':', '').replace('"', '').split(',')
return (key.strip(), val.strip())
def _read_meta_all(f, meta, n_header):
"""Read all meta data from header rows of data file"""
# Skip 'File name' line
f.seek(0)
_ = f.readline()
# Create child dictionary for channel / file
line = f.readline()
(key_ch, val_ch) = _parse_meta_line(line)
val_ch = utils.posix_string(val_ch)
meta['parameters'][val_ch] = OrderedDict()
# Write header values to channel dict
for _ in range(n_header - 2):
line = f.readline()
(key, val) = _parse_meta_line(line)
meta['parameters'][val_ch][key] = val.strip() # depends on [control=['for'], data=[]]
return meta
def _create_meta(path_dir, tag_model, tag_id):
"""Create meta data dictionary"""
import datetime
from . import utils
param_strs = utils.get_tag_params(tag_model)
# Create dictionary of meta data
meta = OrderedDict()
# Create fields for the parameters in data directory name
exp_name = os.path.split(path_dir)[1]
params_tag = utils.parse_experiment_params(exp_name)
for (key, value) in params_tag.items():
meta[key] = value # depends on [control=['for'], data=[]]
fmt = '%Y-%m-%d %H:%M:%S'
meta['date_modified'] = datetime.datetime.now().strftime(fmt)
meta['parameters'] = OrderedDict()
for param_str in param_strs:
print('Create meta entry for {}'.format(param_str))
path_file = utils.find_file(path_dir, param_str, '.TXT')
# Get number of header rows
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
f.seek(0)
meta = _read_meta_all(f, meta, n_header=n_header) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['param_str']]
return meta
# Load meta data from YAML file if it already exists
meta_yaml_path = os.path.join(path_dir, 'meta.yml')
# Load file if exists else create
if os.path.isfile(meta_yaml_path):
meta = yamlord.read_yaml(meta_yaml_path) # depends on [control=['if'], data=[]]
else:
# Else create meta dictionary and save to YAML
meta = _create_meta(path_dir, tag_model, tag_id)
yamlord.write_yaml(meta, meta_yaml_path)
return meta |
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers,
tmpltbank_file, insp_segs, insp_data_name,
insp_anal_name, dax_output, out_dir, tags=None):
""" Create plots that followup the Nth loudest coincident injection
from a statmap produced HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read and analyzed by each inspiral
job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
"""
logging.info('Entering minifollowups module')
if not workflow.cp.has_section('workflow-minifollowups'):
logging.info('There is no [workflow-minifollowups] section in configuration file')
logging.info('Leaving minifollowups')
return
tags = [] if tags is None else tags
makedir(dax_output)
# turn the config file into a File class
config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = wdax.File(os.path.basename(config_path))
config_file.PFN(urlparse.urljoin('file:', urllib.pathname2url(config_path)),
site='local')
exe = Executable(workflow.cp, 'foreground_minifollowup', ifos=workflow.ifos, out_dir=dax_output)
node = exe.create_node()
node.add_input_opt('--config-files', config_file)
node.add_input_opt('--bank-file', tmpltbank_file)
node.add_input_opt('--statmap-file', coinc_file)
node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers)
node.add_input_opt('--inspiral-segments', insp_segs)
node.add_opt('--inspiral-data-read-name', insp_data_name)
node.add_opt('--inspiral-data-analyzed-name', insp_anal_name)
node.new_output_file_opt(workflow.analysis_time, '.dax', '--output-file', tags=tags)
node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags)
node.new_output_file_opt(workflow.analysis_time, '.tc.txt', '--transformation-catalog', tags=tags)
name = node.output_files[0].name
map_file = node.output_files[1]
tc_file = node.output_files[2]
node.add_opt('--workflow-name', name)
node.add_opt('--output-dir', out_dir)
workflow += node
# execute this in a sub-workflow
fil = node.output_files[0]
# determine if a staging site has been specified
try:
staging_site = workflow.cp.get('workflow-foreground_minifollowups',
'staging-site')
except:
staging_site = None
job = dax.DAX(fil)
job.addArguments('--basename %s' % os.path.splitext(os.path.basename(name))[0])
Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)
workflow._adag.addJob(job)
dep = dax.Dependency(parent=node._dax_node, child=job)
workflow._adag.addDependency(dep)
logging.info('Leaving minifollowups module') | def function[setup_foreground_minifollowups, parameter[workflow, coinc_file, single_triggers, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, tags]]:
constant[ Create plots that followup the Nth loudest coincident injection
from a statmap produced HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read and analyzed by each inspiral
job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
]
call[name[logging].info, parameter[constant[Entering minifollowups module]]]
if <ast.UnaryOp object at 0x7da2054a7bb0> begin[:]
call[name[logging].info, parameter[constant[There is no [workflow-minifollowups] section in configuration file]]]
call[name[logging].info, parameter[constant[Leaving minifollowups]]]
return[None]
variable[tags] assign[=] <ast.IfExp object at 0x7da2054a75e0>
call[name[makedir], parameter[name[dax_output]]]
variable[config_path] assign[=] call[name[os].path.abspath, parameter[binary_operation[binary_operation[binary_operation[name[dax_output] + constant[/]] + call[constant[_].join, parameter[name[tags]]]] + constant[foreground_minifollowup.ini]]]]
call[name[workflow].cp.write, parameter[call[name[open], parameter[name[config_path], constant[w]]]]]
variable[config_file] assign[=] call[name[wdax].File, parameter[call[name[os].path.basename, parameter[name[config_path]]]]]
call[name[config_file].PFN, parameter[call[name[urlparse].urljoin, parameter[constant[file:], call[name[urllib].pathname2url, parameter[name[config_path]]]]]]]
variable[exe] assign[=] call[name[Executable], parameter[name[workflow].cp, constant[foreground_minifollowup]]]
variable[node] assign[=] call[name[exe].create_node, parameter[]]
call[name[node].add_input_opt, parameter[constant[--config-files], name[config_file]]]
call[name[node].add_input_opt, parameter[constant[--bank-file], name[tmpltbank_file]]]
call[name[node].add_input_opt, parameter[constant[--statmap-file], name[coinc_file]]]
call[name[node].add_multiifo_input_list_opt, parameter[constant[--single-detector-triggers], name[single_triggers]]]
call[name[node].add_input_opt, parameter[constant[--inspiral-segments], name[insp_segs]]]
call[name[node].add_opt, parameter[constant[--inspiral-data-read-name], name[insp_data_name]]]
call[name[node].add_opt, parameter[constant[--inspiral-data-analyzed-name], name[insp_anal_name]]]
call[name[node].new_output_file_opt, parameter[name[workflow].analysis_time, constant[.dax], constant[--output-file]]]
call[name[node].new_output_file_opt, parameter[name[workflow].analysis_time, constant[.dax.map], constant[--output-map]]]
call[name[node].new_output_file_opt, parameter[name[workflow].analysis_time, constant[.tc.txt], constant[--transformation-catalog]]]
variable[name] assign[=] call[name[node].output_files][constant[0]].name
variable[map_file] assign[=] call[name[node].output_files][constant[1]]
variable[tc_file] assign[=] call[name[node].output_files][constant[2]]
call[name[node].add_opt, parameter[constant[--workflow-name], name[name]]]
call[name[node].add_opt, parameter[constant[--output-dir], name[out_dir]]]
<ast.AugAssign object at 0x7da20e963e20>
variable[fil] assign[=] call[name[node].output_files][constant[0]]
<ast.Try object at 0x7da20e9623e0>
variable[job] assign[=] call[name[dax].DAX, parameter[name[fil]]]
call[name[job].addArguments, parameter[binary_operation[constant[--basename %s] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[name]]]]]][constant[0]]]]]
call[name[Workflow].set_job_properties, parameter[name[job], name[map_file], name[tc_file]]]
call[name[workflow]._adag.addJob, parameter[name[job]]]
variable[dep] assign[=] call[name[dax].Dependency, parameter[]]
call[name[workflow]._adag.addDependency, parameter[name[dep]]]
call[name[logging].info, parameter[constant[Leaving minifollowups module]]] | keyword[def] identifier[setup_foreground_minifollowups] ( identifier[workflow] , identifier[coinc_file] , identifier[single_triggers] ,
identifier[tmpltbank_file] , identifier[insp_segs] , identifier[insp_data_name] ,
identifier[insp_anal_name] , identifier[dax_output] , identifier[out_dir] , identifier[tags] = keyword[None] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[workflow] . identifier[cp] . identifier[has_section] ( literal[string] ):
identifier[logging] . identifier[info] ( literal[string] )
identifier[logging] . identifier[info] ( literal[string] )
keyword[return]
identifier[tags] =[] keyword[if] identifier[tags] keyword[is] keyword[None] keyword[else] identifier[tags]
identifier[makedir] ( identifier[dax_output] )
identifier[config_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[dax_output] + literal[string] + literal[string] . identifier[join] ( identifier[tags] )+ literal[string] )
identifier[workflow] . identifier[cp] . identifier[write] ( identifier[open] ( identifier[config_path] , literal[string] ))
identifier[config_file] = identifier[wdax] . identifier[File] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[config_path] ))
identifier[config_file] . identifier[PFN] ( identifier[urlparse] . identifier[urljoin] ( literal[string] , identifier[urllib] . identifier[pathname2url] ( identifier[config_path] )),
identifier[site] = literal[string] )
identifier[exe] = identifier[Executable] ( identifier[workflow] . identifier[cp] , literal[string] , identifier[ifos] = identifier[workflow] . identifier[ifos] , identifier[out_dir] = identifier[dax_output] )
identifier[node] = identifier[exe] . identifier[create_node] ()
identifier[node] . identifier[add_input_opt] ( literal[string] , identifier[config_file] )
identifier[node] . identifier[add_input_opt] ( literal[string] , identifier[tmpltbank_file] )
identifier[node] . identifier[add_input_opt] ( literal[string] , identifier[coinc_file] )
identifier[node] . identifier[add_multiifo_input_list_opt] ( literal[string] , identifier[single_triggers] )
identifier[node] . identifier[add_input_opt] ( literal[string] , identifier[insp_segs] )
identifier[node] . identifier[add_opt] ( literal[string] , identifier[insp_data_name] )
identifier[node] . identifier[add_opt] ( literal[string] , identifier[insp_anal_name] )
identifier[node] . identifier[new_output_file_opt] ( identifier[workflow] . identifier[analysis_time] , literal[string] , literal[string] , identifier[tags] = identifier[tags] )
identifier[node] . identifier[new_output_file_opt] ( identifier[workflow] . identifier[analysis_time] , literal[string] , literal[string] , identifier[tags] = identifier[tags] )
identifier[node] . identifier[new_output_file_opt] ( identifier[workflow] . identifier[analysis_time] , literal[string] , literal[string] , identifier[tags] = identifier[tags] )
identifier[name] = identifier[node] . identifier[output_files] [ literal[int] ]. identifier[name]
identifier[map_file] = identifier[node] . identifier[output_files] [ literal[int] ]
identifier[tc_file] = identifier[node] . identifier[output_files] [ literal[int] ]
identifier[node] . identifier[add_opt] ( literal[string] , identifier[name] )
identifier[node] . identifier[add_opt] ( literal[string] , identifier[out_dir] )
identifier[workflow] += identifier[node]
identifier[fil] = identifier[node] . identifier[output_files] [ literal[int] ]
keyword[try] :
identifier[staging_site] = identifier[workflow] . identifier[cp] . identifier[get] ( literal[string] ,
literal[string] )
keyword[except] :
identifier[staging_site] = keyword[None]
identifier[job] = identifier[dax] . identifier[DAX] ( identifier[fil] )
identifier[job] . identifier[addArguments] ( literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[name] ))[ literal[int] ])
identifier[Workflow] . identifier[set_job_properties] ( identifier[job] , identifier[map_file] , identifier[tc_file] , identifier[staging_site] = identifier[staging_site] )
identifier[workflow] . identifier[_adag] . identifier[addJob] ( identifier[job] )
identifier[dep] = identifier[dax] . identifier[Dependency] ( identifier[parent] = identifier[node] . identifier[_dax_node] , identifier[child] = identifier[job] )
identifier[workflow] . identifier[_adag] . identifier[addDependency] ( identifier[dep] )
identifier[logging] . identifier[info] ( literal[string] ) | def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, tags=None):
""" Create plots that followup the Nth loudest coincident injection
from a statmap produced HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read and analyzed by each inspiral
job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
"""
logging.info('Entering minifollowups module')
if not workflow.cp.has_section('workflow-minifollowups'):
logging.info('There is no [workflow-minifollowups] section in configuration file')
logging.info('Leaving minifollowups')
return # depends on [control=['if'], data=[]]
tags = [] if tags is None else tags
makedir(dax_output)
# turn the config file into a File class
config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = wdax.File(os.path.basename(config_path))
config_file.PFN(urlparse.urljoin('file:', urllib.pathname2url(config_path)), site='local')
exe = Executable(workflow.cp, 'foreground_minifollowup', ifos=workflow.ifos, out_dir=dax_output)
node = exe.create_node()
node.add_input_opt('--config-files', config_file)
node.add_input_opt('--bank-file', tmpltbank_file)
node.add_input_opt('--statmap-file', coinc_file)
node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers)
node.add_input_opt('--inspiral-segments', insp_segs)
node.add_opt('--inspiral-data-read-name', insp_data_name)
node.add_opt('--inspiral-data-analyzed-name', insp_anal_name)
node.new_output_file_opt(workflow.analysis_time, '.dax', '--output-file', tags=tags)
node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags)
node.new_output_file_opt(workflow.analysis_time, '.tc.txt', '--transformation-catalog', tags=tags)
name = node.output_files[0].name
map_file = node.output_files[1]
tc_file = node.output_files[2]
node.add_opt('--workflow-name', name)
node.add_opt('--output-dir', out_dir)
workflow += node
# execute this in a sub-workflow
fil = node.output_files[0]
# determine if a staging site has been specified
try:
staging_site = workflow.cp.get('workflow-foreground_minifollowups', 'staging-site') # depends on [control=['try'], data=[]]
except:
staging_site = None # depends on [control=['except'], data=[]]
job = dax.DAX(fil)
job.addArguments('--basename %s' % os.path.splitext(os.path.basename(name))[0])
Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)
workflow._adag.addJob(job)
dep = dax.Dependency(parent=node._dax_node, child=job)
workflow._adag.addDependency(dep)
logging.info('Leaving minifollowups module') |
def sample(dataset, target, tolerance=None, pass_cell_arrays=True,
pass_point_arrays=True):
"""Resample scalar data from a passed mesh onto this mesh using
:class:`vtk.vtkResampleWithDataSet`.
Parameters
----------
dataset: vtki.Common
The source vtk data object as the mesh to sample values on to
target: vtki.Common
The vtk data object to sample from - point and cell arrays from
this object are sampled onto the nodes of the ``dataset`` mesh
tolerance: flaot, optional
tolerance used to compute whether a point in the source is in a
cell of the input. If not given, tolerance automatically generated.
pass_cell_arrays: bool, optional
Preserve source mesh's original cell data arrays
pass_point_arrays: bool, optional
Preserve source mesh's original point data arrays
"""
alg = vtk.vtkResampleWithDataSet() # Construct the ResampleWithDataSet object
alg.SetInputData(dataset) # Set the Input data (actually the source i.e. where to sample from)
alg.SetSourceData(target) # Set the Source data (actually the target, i.e. where to sample to)
alg.SetPassCellArrays(pass_cell_arrays)
alg.SetPassPointArrays(pass_point_arrays)
if tolerance is not None:
alg.SetComputeTolerance(False)
alg.SetTolerance(tolerance)
alg.Update() # Perfrom the resampling
return _get_output(alg) | def function[sample, parameter[dataset, target, tolerance, pass_cell_arrays, pass_point_arrays]]:
constant[Resample scalar data from a passed mesh onto this mesh using
:class:`vtk.vtkResampleWithDataSet`.
Parameters
----------
dataset: vtki.Common
The source vtk data object as the mesh to sample values on to
target: vtki.Common
The vtk data object to sample from - point and cell arrays from
this object are sampled onto the nodes of the ``dataset`` mesh
tolerance: flaot, optional
tolerance used to compute whether a point in the source is in a
cell of the input. If not given, tolerance automatically generated.
pass_cell_arrays: bool, optional
Preserve source mesh's original cell data arrays
pass_point_arrays: bool, optional
Preserve source mesh's original point data arrays
]
variable[alg] assign[=] call[name[vtk].vtkResampleWithDataSet, parameter[]]
call[name[alg].SetInputData, parameter[name[dataset]]]
call[name[alg].SetSourceData, parameter[name[target]]]
call[name[alg].SetPassCellArrays, parameter[name[pass_cell_arrays]]]
call[name[alg].SetPassPointArrays, parameter[name[pass_point_arrays]]]
if compare[name[tolerance] is_not constant[None]] begin[:]
call[name[alg].SetComputeTolerance, parameter[constant[False]]]
call[name[alg].SetTolerance, parameter[name[tolerance]]]
call[name[alg].Update, parameter[]]
return[call[name[_get_output], parameter[name[alg]]]] | keyword[def] identifier[sample] ( identifier[dataset] , identifier[target] , identifier[tolerance] = keyword[None] , identifier[pass_cell_arrays] = keyword[True] ,
identifier[pass_point_arrays] = keyword[True] ):
literal[string]
identifier[alg] = identifier[vtk] . identifier[vtkResampleWithDataSet] ()
identifier[alg] . identifier[SetInputData] ( identifier[dataset] )
identifier[alg] . identifier[SetSourceData] ( identifier[target] )
identifier[alg] . identifier[SetPassCellArrays] ( identifier[pass_cell_arrays] )
identifier[alg] . identifier[SetPassPointArrays] ( identifier[pass_point_arrays] )
keyword[if] identifier[tolerance] keyword[is] keyword[not] keyword[None] :
identifier[alg] . identifier[SetComputeTolerance] ( keyword[False] )
identifier[alg] . identifier[SetTolerance] ( identifier[tolerance] )
identifier[alg] . identifier[Update] ()
keyword[return] identifier[_get_output] ( identifier[alg] ) | def sample(dataset, target, tolerance=None, pass_cell_arrays=True, pass_point_arrays=True):
"""Resample scalar data from a passed mesh onto this mesh using
:class:`vtk.vtkResampleWithDataSet`.
Parameters
----------
dataset: vtki.Common
The source vtk data object as the mesh to sample values on to
target: vtki.Common
The vtk data object to sample from - point and cell arrays from
this object are sampled onto the nodes of the ``dataset`` mesh
tolerance: flaot, optional
tolerance used to compute whether a point in the source is in a
cell of the input. If not given, tolerance automatically generated.
pass_cell_arrays: bool, optional
Preserve source mesh's original cell data arrays
pass_point_arrays: bool, optional
Preserve source mesh's original point data arrays
"""
alg = vtk.vtkResampleWithDataSet() # Construct the ResampleWithDataSet object
alg.SetInputData(dataset) # Set the Input data (actually the source i.e. where to sample from)
alg.SetSourceData(target) # Set the Source data (actually the target, i.e. where to sample to)
alg.SetPassCellArrays(pass_cell_arrays)
alg.SetPassPointArrays(pass_point_arrays)
if tolerance is not None:
alg.SetComputeTolerance(False)
alg.SetTolerance(tolerance) # depends on [control=['if'], data=['tolerance']]
alg.Update() # Perfrom the resampling
return _get_output(alg) |
def tokenize_punctuation_command(text):
"""Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position
"""
if text.peek() == '\\':
for point in PUNCTUATION_COMMANDS:
if text.peek((1, len(point) + 1)) == point:
return text.forward(len(point) + 1) | def function[tokenize_punctuation_command, parameter[text]]:
constant[Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position
]
if compare[call[name[text].peek, parameter[]] equal[==] constant[\]] begin[:]
for taget[name[point]] in starred[name[PUNCTUATION_COMMANDS]] begin[:]
if compare[call[name[text].peek, parameter[tuple[[<ast.Constant object at 0x7da1b0792d10>, <ast.BinOp object at 0x7da1b0790700>]]]] equal[==] name[point]] begin[:]
return[call[name[text].forward, parameter[binary_operation[call[name[len], parameter[name[point]]] + constant[1]]]]] | keyword[def] identifier[tokenize_punctuation_command] ( identifier[text] ):
literal[string]
keyword[if] identifier[text] . identifier[peek] ()== literal[string] :
keyword[for] identifier[point] keyword[in] identifier[PUNCTUATION_COMMANDS] :
keyword[if] identifier[text] . identifier[peek] (( literal[int] , identifier[len] ( identifier[point] )+ literal[int] ))== identifier[point] :
keyword[return] identifier[text] . identifier[forward] ( identifier[len] ( identifier[point] )+ literal[int] ) | def tokenize_punctuation_command(text):
"""Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position
"""
if text.peek() == '\\':
for point in PUNCTUATION_COMMANDS:
if text.peek((1, len(point) + 1)) == point:
return text.forward(len(point) + 1) # depends on [control=['if'], data=['point']] # depends on [control=['for'], data=['point']] # depends on [control=['if'], data=[]] |
def is_subset(a, b):
"""Excluding same size"""
return b.left <= a.left and b.right > a.right or b.left < a.left and b.right >= a.right | def function[is_subset, parameter[a, b]]:
constant[Excluding same size]
return[<ast.BoolOp object at 0x7da20c76c610>] | keyword[def] identifier[is_subset] ( identifier[a] , identifier[b] ):
literal[string]
keyword[return] identifier[b] . identifier[left] <= identifier[a] . identifier[left] keyword[and] identifier[b] . identifier[right] > identifier[a] . identifier[right] keyword[or] identifier[b] . identifier[left] < identifier[a] . identifier[left] keyword[and] identifier[b] . identifier[right] >= identifier[a] . identifier[right] | def is_subset(a, b):
"""Excluding same size"""
return b.left <= a.left and b.right > a.right or (b.left < a.left and b.right >= a.right) |
def main():
"""Entry point for remove_template."""
# Wrap sys stdout for python 2, so print can understand unicode.
if sys.version_info[0] < 3:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
options = docopt.docopt(__doc__,
help=True,
version='template_remover v%s' % __VERSION__)
print(template_remover.clean(io.open(options['FILENAME']).read()))
return 0 | def function[main, parameter[]]:
constant[Entry point for remove_template.]
if compare[call[name[sys].version_info][constant[0]] less[<] constant[3]] begin[:]
name[sys].stdout assign[=] call[call[name[codecs].getwriter, parameter[constant[utf-8]]], parameter[name[sys].stdout]]
variable[options] assign[=] call[name[docopt].docopt, parameter[name[__doc__]]]
call[name[print], parameter[call[name[template_remover].clean, parameter[call[call[name[io].open, parameter[call[name[options]][constant[FILENAME]]]].read, parameter[]]]]]]
return[constant[0]] | keyword[def] identifier[main] ():
literal[string]
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]< literal[int] :
identifier[sys] . identifier[stdout] = identifier[codecs] . identifier[getwriter] ( literal[string] )( identifier[sys] . identifier[stdout] )
identifier[options] = identifier[docopt] . identifier[docopt] ( identifier[__doc__] ,
identifier[help] = keyword[True] ,
identifier[version] = literal[string] % identifier[__VERSION__] )
identifier[print] ( identifier[template_remover] . identifier[clean] ( identifier[io] . identifier[open] ( identifier[options] [ literal[string] ]). identifier[read] ()))
keyword[return] literal[int] | def main():
"""Entry point for remove_template."""
# Wrap sys stdout for python 2, so print can understand unicode.
if sys.version_info[0] < 3:
sys.stdout = codecs.getwriter('utf-8')(sys.stdout) # depends on [control=['if'], data=[]]
options = docopt.docopt(__doc__, help=True, version='template_remover v%s' % __VERSION__)
print(template_remover.clean(io.open(options['FILENAME']).read()))
return 0 |
def insert_attachments(self, volumeID, attachments):
''' add attachments to an already existing volume '''
log.debug("adding new attachments to volume '{}': {}".format(volumeID, attachments))
if not attachments:
return
rawVolume = self._req_raw_volume(volumeID)
attsID = list()
for index, a in enumerate(attachments):
try:
rawAttachment = self._assemble_attachment(a['file'], a)
rawVolume['_source']['_attachments'].append(rawAttachment)
attsID.append(rawAttachment['id'])
except Exception:
log.exception("Error while elaborating attachments array at index: {}".format(index))
raise
self._db.modify_book(volumeID, rawVolume['_source'], version=rawVolume['_version'])
return attsID | def function[insert_attachments, parameter[self, volumeID, attachments]]:
constant[ add attachments to an already existing volume ]
call[name[log].debug, parameter[call[constant[adding new attachments to volume '{}': {}].format, parameter[name[volumeID], name[attachments]]]]]
if <ast.UnaryOp object at 0x7da1b2667490> begin[:]
return[None]
variable[rawVolume] assign[=] call[name[self]._req_raw_volume, parameter[name[volumeID]]]
variable[attsID] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b2666530>, <ast.Name object at 0x7da1b2666650>]]] in starred[call[name[enumerate], parameter[name[attachments]]]] begin[:]
<ast.Try object at 0x7da1b2666110>
call[name[self]._db.modify_book, parameter[name[volumeID], call[name[rawVolume]][constant[_source]]]]
return[name[attsID]] | keyword[def] identifier[insert_attachments] ( identifier[self] , identifier[volumeID] , identifier[attachments] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[volumeID] , identifier[attachments] ))
keyword[if] keyword[not] identifier[attachments] :
keyword[return]
identifier[rawVolume] = identifier[self] . identifier[_req_raw_volume] ( identifier[volumeID] )
identifier[attsID] = identifier[list] ()
keyword[for] identifier[index] , identifier[a] keyword[in] identifier[enumerate] ( identifier[attachments] ):
keyword[try] :
identifier[rawAttachment] = identifier[self] . identifier[_assemble_attachment] ( identifier[a] [ literal[string] ], identifier[a] )
identifier[rawVolume] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[rawAttachment] )
identifier[attsID] . identifier[append] ( identifier[rawAttachment] [ literal[string] ])
keyword[except] identifier[Exception] :
identifier[log] . identifier[exception] ( literal[string] . identifier[format] ( identifier[index] ))
keyword[raise]
identifier[self] . identifier[_db] . identifier[modify_book] ( identifier[volumeID] , identifier[rawVolume] [ literal[string] ], identifier[version] = identifier[rawVolume] [ literal[string] ])
keyword[return] identifier[attsID] | def insert_attachments(self, volumeID, attachments):
""" add attachments to an already existing volume """
log.debug("adding new attachments to volume '{}': {}".format(volumeID, attachments))
if not attachments:
return # depends on [control=['if'], data=[]]
rawVolume = self._req_raw_volume(volumeID)
attsID = list()
for (index, a) in enumerate(attachments):
try:
rawAttachment = self._assemble_attachment(a['file'], a)
rawVolume['_source']['_attachments'].append(rawAttachment)
attsID.append(rawAttachment['id']) # depends on [control=['try'], data=[]]
except Exception:
log.exception('Error while elaborating attachments array at index: {}'.format(index))
raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
self._db.modify_book(volumeID, rawVolume['_source'], version=rawVolume['_version'])
return attsID |
def parse_epsv_response(s):
"""
Parsing `EPSV` (`message (|||port|)`) response.
:param s: response line
:type s: :py:class:`str`
:return: (ip, port)
:rtype: (:py:class:`None`, :py:class:`int`)
"""
matches = tuple(re.finditer(r"\((.)\1\1\d+\1\)", s))
s = matches[-1].group()
port = int(s[4:-2])
return None, port | def function[parse_epsv_response, parameter[s]]:
constant[
Parsing `EPSV` (`message (|||port|)`) response.
:param s: response line
:type s: :py:class:`str`
:return: (ip, port)
:rtype: (:py:class:`None`, :py:class:`int`)
]
variable[matches] assign[=] call[name[tuple], parameter[call[name[re].finditer, parameter[constant[\((.)\1\1\d+\1\)], name[s]]]]]
variable[s] assign[=] call[call[name[matches]][<ast.UnaryOp object at 0x7da1b00b4190>].group, parameter[]]
variable[port] assign[=] call[name[int], parameter[call[name[s]][<ast.Slice object at 0x7da1b00b5b70>]]]
return[tuple[[<ast.Constant object at 0x7da1b00b49a0>, <ast.Name object at 0x7da1b00b4dc0>]]] | keyword[def] identifier[parse_epsv_response] ( identifier[s] ):
literal[string]
identifier[matches] = identifier[tuple] ( identifier[re] . identifier[finditer] ( literal[string] , identifier[s] ))
identifier[s] = identifier[matches] [- literal[int] ]. identifier[group] ()
identifier[port] = identifier[int] ( identifier[s] [ literal[int] :- literal[int] ])
keyword[return] keyword[None] , identifier[port] | def parse_epsv_response(s):
"""
Parsing `EPSV` (`message (|||port|)`) response.
:param s: response line
:type s: :py:class:`str`
:return: (ip, port)
:rtype: (:py:class:`None`, :py:class:`int`)
"""
matches = tuple(re.finditer('\\((.)\\1\\1\\d+\\1\\)', s))
s = matches[-1].group()
port = int(s[4:-2])
return (None, port) |
def _trigger_timers(self):
"""Triggers expired timers"""
current = time.time()
while len(self.timer_tasks) > 0 and (self.timer_tasks[0][0] - current <= 0):
task = heappop(self.timer_tasks)[1]
task() | def function[_trigger_timers, parameter[self]]:
constant[Triggers expired timers]
variable[current] assign[=] call[name[time].time, parameter[]]
while <ast.BoolOp object at 0x7da18dc99750> begin[:]
variable[task] assign[=] call[call[name[heappop], parameter[name[self].timer_tasks]]][constant[1]]
call[name[task], parameter[]] | keyword[def] identifier[_trigger_timers] ( identifier[self] ):
literal[string]
identifier[current] = identifier[time] . identifier[time] ()
keyword[while] identifier[len] ( identifier[self] . identifier[timer_tasks] )> literal[int] keyword[and] ( identifier[self] . identifier[timer_tasks] [ literal[int] ][ literal[int] ]- identifier[current] <= literal[int] ):
identifier[task] = identifier[heappop] ( identifier[self] . identifier[timer_tasks] )[ literal[int] ]
identifier[task] () | def _trigger_timers(self):
"""Triggers expired timers"""
current = time.time()
while len(self.timer_tasks) > 0 and self.timer_tasks[0][0] - current <= 0:
task = heappop(self.timer_tasks)[1]
task() # depends on [control=['while'], data=[]] |
def _make_opt_list(opts, group):
"""Generate a list of tuple containing group, options
:param opts: option lists associated with a group
:type opts: list
:param group: name of an option group
:type group: str
:return: a list of (group_name, opts) tuples
:rtype: list
"""
import copy
import itertools
_opts = [(group, list(itertools.chain(*opts)))]
return [(g, copy.deepcopy(o)) for g, o in _opts] | def function[_make_opt_list, parameter[opts, group]]:
constant[Generate a list of tuple containing group, options
:param opts: option lists associated with a group
:type opts: list
:param group: name of an option group
:type group: str
:return: a list of (group_name, opts) tuples
:rtype: list
]
import module[copy]
import module[itertools]
variable[_opts] assign[=] list[[<ast.Tuple object at 0x7da1b141a1a0>]]
return[<ast.ListComp object at 0x7da1b141ad40>] | keyword[def] identifier[_make_opt_list] ( identifier[opts] , identifier[group] ):
literal[string]
keyword[import] identifier[copy]
keyword[import] identifier[itertools]
identifier[_opts] =[( identifier[group] , identifier[list] ( identifier[itertools] . identifier[chain] (* identifier[opts] )))]
keyword[return] [( identifier[g] , identifier[copy] . identifier[deepcopy] ( identifier[o] )) keyword[for] identifier[g] , identifier[o] keyword[in] identifier[_opts] ] | def _make_opt_list(opts, group):
"""Generate a list of tuple containing group, options
:param opts: option lists associated with a group
:type opts: list
:param group: name of an option group
:type group: str
:return: a list of (group_name, opts) tuples
:rtype: list
"""
import copy
import itertools
_opts = [(group, list(itertools.chain(*opts)))]
return [(g, copy.deepcopy(o)) for (g, o) in _opts] |
def __Script_Editor_Output_plainTextEdit_refresh_ui(self):
"""
Updates the **Script_Editor_Output_plainTextEdit** Widget.
"""
memory_handler_stack_depth = len(self.__engine.logging_session_handler_stream.stream)
if memory_handler_stack_depth != self.__memory_handler_stack_depth:
for line in self.__engine.logging_session_handler_stream.stream[
self.__memory_handler_stack_depth:memory_handler_stack_depth]:
self.Script_Editor_Output_plainTextEdit.moveCursor(QTextCursor.End)
self.Script_Editor_Output_plainTextEdit.insertPlainText(line)
self.__Script_Editor_Output_plainTextEdit_set_default_view_state()
self.__memory_handler_stack_depth = memory_handler_stack_depth | def function[__Script_Editor_Output_plainTextEdit_refresh_ui, parameter[self]]:
constant[
Updates the **Script_Editor_Output_plainTextEdit** Widget.
]
variable[memory_handler_stack_depth] assign[=] call[name[len], parameter[name[self].__engine.logging_session_handler_stream.stream]]
if compare[name[memory_handler_stack_depth] not_equal[!=] name[self].__memory_handler_stack_depth] begin[:]
for taget[name[line]] in starred[call[name[self].__engine.logging_session_handler_stream.stream][<ast.Slice object at 0x7da1b0966590>]] begin[:]
call[name[self].Script_Editor_Output_plainTextEdit.moveCursor, parameter[name[QTextCursor].End]]
call[name[self].Script_Editor_Output_plainTextEdit.insertPlainText, parameter[name[line]]]
call[name[self].__Script_Editor_Output_plainTextEdit_set_default_view_state, parameter[]]
name[self].__memory_handler_stack_depth assign[=] name[memory_handler_stack_depth] | keyword[def] identifier[__Script_Editor_Output_plainTextEdit_refresh_ui] ( identifier[self] ):
literal[string]
identifier[memory_handler_stack_depth] = identifier[len] ( identifier[self] . identifier[__engine] . identifier[logging_session_handler_stream] . identifier[stream] )
keyword[if] identifier[memory_handler_stack_depth] != identifier[self] . identifier[__memory_handler_stack_depth] :
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[__engine] . identifier[logging_session_handler_stream] . identifier[stream] [
identifier[self] . identifier[__memory_handler_stack_depth] : identifier[memory_handler_stack_depth] ]:
identifier[self] . identifier[Script_Editor_Output_plainTextEdit] . identifier[moveCursor] ( identifier[QTextCursor] . identifier[End] )
identifier[self] . identifier[Script_Editor_Output_plainTextEdit] . identifier[insertPlainText] ( identifier[line] )
identifier[self] . identifier[__Script_Editor_Output_plainTextEdit_set_default_view_state] ()
identifier[self] . identifier[__memory_handler_stack_depth] = identifier[memory_handler_stack_depth] | def __Script_Editor_Output_plainTextEdit_refresh_ui(self):
"""
Updates the **Script_Editor_Output_plainTextEdit** Widget.
"""
memory_handler_stack_depth = len(self.__engine.logging_session_handler_stream.stream)
if memory_handler_stack_depth != self.__memory_handler_stack_depth:
for line in self.__engine.logging_session_handler_stream.stream[self.__memory_handler_stack_depth:memory_handler_stack_depth]:
self.Script_Editor_Output_plainTextEdit.moveCursor(QTextCursor.End)
self.Script_Editor_Output_plainTextEdit.insertPlainText(line) # depends on [control=['for'], data=['line']]
self.__Script_Editor_Output_plainTextEdit_set_default_view_state()
self.__memory_handler_stack_depth = memory_handler_stack_depth # depends on [control=['if'], data=['memory_handler_stack_depth']] |
def save(self, directory, parameters='all'):
"""
Saves results to disk.
Depending on which results are selected and if they exist, the
following directories and files are created:
* `powerflow_results` directory
* `voltages_pu.csv`
See :py:attr:`~pfa_v_mag_pu` for more information.
* `currents.csv`
See :func:`~i_res` for more information.
* `active_powers.csv`
See :py:attr:`~pfa_p` for more information.
* `reactive_powers.csv`
See :py:attr:`~pfa_q` for more information.
* `apparent_powers.csv`
See :func:`~s_res` for more information.
* `grid_losses.csv`
See :py:attr:`~grid_losses` for more information.
* `hv_mv_exchanges.csv`
See :py:attr:`~hv_mv_exchanges` for more information.
* `pypsa_network` directory
See :py:func:`pypsa.Network.export_to_csv_folder`
* `grid_expansion_results` directory
* `grid_expansion_costs.csv`
See :py:attr:`~grid_expansion_costs` for more information.
* `equipment_changes.csv`
See :py:attr:`~equipment_changes` for more information.
* `unresolved_issues.csv`
See :py:attr:`~unresolved_issues` for more information.
* `curtailment_results` directory
Files depend on curtailment specifications. There will be one file
for each curtailment specification, that is for every key in
:py:attr:`~curtailment` dictionary.
* `storage_integration_results` directory
* `storages.csv`
See :func:`~storages` for more information.
Parameters
----------
directory : :obj:`str`
Directory to save the results in.
parameters : :obj:`str` or :obj:`list` of :obj:`str`
Specifies which results will be saved. By default all results are
saved. To only save certain results set `parameters` to one of the
following options or choose several options by providing a list:
* 'pypsa_network'
* 'powerflow_results'
* 'grid_expansion_results'
* 'curtailment_results'
* 'storage_integration_results'
"""
def _save_power_flow_results(target_dir):
if self.pfa_v_mag_pu is not None:
# create directory
os.makedirs(target_dir, exist_ok=True)
# voltage
self.pfa_v_mag_pu.to_csv(
os.path.join(target_dir, 'voltages_pu.csv'))
# current
self.i_res.to_csv(
os.path.join(target_dir, 'currents.csv'))
# active power
self.pfa_p.to_csv(
os.path.join(target_dir, 'active_powers.csv'))
# reactive power
self.pfa_q.to_csv(
os.path.join(target_dir, 'reactive_powers.csv'))
# apparent power
self.s_res().to_csv(
os.path.join(target_dir, 'apparent_powers.csv'))
# grid losses
self.grid_losses.to_csv(
os.path.join(target_dir, 'grid_losses.csv'))
# grid exchanges
self.hv_mv_exchanges.to_csv(os.path.join(
target_dir, 'hv_mv_exchanges.csv'))
def _save_pypsa_network(target_dir):
if self.network.pypsa:
# create directory
os.makedirs(target_dir, exist_ok=True)
self.network.pypsa.export_to_csv_folder(target_dir)
def _save_grid_expansion_results(target_dir):
if self.grid_expansion_costs is not None:
# create directory
os.makedirs(target_dir, exist_ok=True)
# grid expansion costs
self.grid_expansion_costs.to_csv(os.path.join(
target_dir, 'grid_expansion_costs.csv'))
# unresolved issues
pd.DataFrame(self.unresolved_issues).to_csv(os.path.join(
target_dir, 'unresolved_issues.csv'))
# equipment changes
self.equipment_changes.to_csv(os.path.join(
target_dir, 'equipment_changes.csv'))
def _save_curtailment_results(target_dir):
if self.curtailment is not None:
# create directory
os.makedirs(target_dir, exist_ok=True)
for key, curtailment_df in self.curtailment.items():
if type(key) == tuple:
type_prefix = '-'.join([key[0], str(key[1])])
elif type(key) == str:
type_prefix = key
else:
raise KeyError("Unknown key type {} for key {}".format(
type(key), key))
filename = os.path.join(
target_dir, '{}.csv'.format(type_prefix))
curtailment_df.to_csv(filename, index_label=type_prefix)
def _save_storage_integration_results(target_dir):
storages = self.storages
if not storages.empty:
# create directory
os.makedirs(target_dir, exist_ok=True)
# general storage information
storages.to_csv(os.path.join(target_dir, 'storages.csv'))
# storages time series
ts_p, ts_q = self.storages_timeseries()
ts_p.to_csv(os.path.join(
target_dir, 'storages_active_power.csv'))
ts_q.to_csv(os.path.join(
target_dir, 'storages_reactive_power.csv'))
if not self.storages_costs_reduction is None:
self.storages_costs_reduction.to_csv(
os.path.join(target_dir,
'storages_costs_reduction.csv'))
# dictionary with function to call to save each parameter
func_dict = {
'powerflow_results': _save_power_flow_results,
'pypsa_network': _save_pypsa_network,
'grid_expansion_results': _save_grid_expansion_results,
'curtailment_results': _save_curtailment_results,
'storage_integration_results': _save_storage_integration_results
}
# if string is given convert to list
if isinstance(parameters, str):
if parameters == 'all':
parameters = ['powerflow_results', 'pypsa_network',
'grid_expansion_results', 'curtailment_results',
'storage_integration_results']
else:
parameters = [parameters]
# save each parameter
for parameter in parameters:
try:
func_dict[parameter](os.path.join(directory, parameter))
except KeyError:
message = "Invalid input {} for `parameters` when saving " \
"results. Must be any or a list of the following: " \
"'pypsa_network', 'powerflow_results', " \
"'grid_expansion_results', 'curtailment_results', " \
"'storage_integration_results'.".format(parameter)
logger.error(message)
raise KeyError(message)
except:
raise
# save measures
pd.DataFrame(data={'measure': self.measures}).to_csv(
os.path.join(directory, 'measures.csv'))
# save configs
with open(os.path.join(directory, 'configs.csv'), 'w') as f:
writer = csv.writer(f)
rows = [
['{}'.format(key)] + [value for item in values.items()
for value in item]
for key, values in self.network.config._data.items()]
writer.writerows(rows) | def function[save, parameter[self, directory, parameters]]:
constant[
Saves results to disk.
Depending on which results are selected and if they exist, the
following directories and files are created:
* `powerflow_results` directory
* `voltages_pu.csv`
See :py:attr:`~pfa_v_mag_pu` for more information.
* `currents.csv`
See :func:`~i_res` for more information.
* `active_powers.csv`
See :py:attr:`~pfa_p` for more information.
* `reactive_powers.csv`
See :py:attr:`~pfa_q` for more information.
* `apparent_powers.csv`
See :func:`~s_res` for more information.
* `grid_losses.csv`
See :py:attr:`~grid_losses` for more information.
* `hv_mv_exchanges.csv`
See :py:attr:`~hv_mv_exchanges` for more information.
* `pypsa_network` directory
See :py:func:`pypsa.Network.export_to_csv_folder`
* `grid_expansion_results` directory
* `grid_expansion_costs.csv`
See :py:attr:`~grid_expansion_costs` for more information.
* `equipment_changes.csv`
See :py:attr:`~equipment_changes` for more information.
* `unresolved_issues.csv`
See :py:attr:`~unresolved_issues` for more information.
* `curtailment_results` directory
Files depend on curtailment specifications. There will be one file
for each curtailment specification, that is for every key in
:py:attr:`~curtailment` dictionary.
* `storage_integration_results` directory
* `storages.csv`
See :func:`~storages` for more information.
Parameters
----------
directory : :obj:`str`
Directory to save the results in.
parameters : :obj:`str` or :obj:`list` of :obj:`str`
Specifies which results will be saved. By default all results are
saved. To only save certain results set `parameters` to one of the
following options or choose several options by providing a list:
* 'pypsa_network'
* 'powerflow_results'
* 'grid_expansion_results'
* 'curtailment_results'
* 'storage_integration_results'
]
def function[_save_power_flow_results, parameter[target_dir]]:
if compare[name[self].pfa_v_mag_pu is_not constant[None]] begin[:]
call[name[os].makedirs, parameter[name[target_dir]]]
call[name[self].pfa_v_mag_pu.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[voltages_pu.csv]]]]]
call[name[self].i_res.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[currents.csv]]]]]
call[name[self].pfa_p.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[active_powers.csv]]]]]
call[name[self].pfa_q.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[reactive_powers.csv]]]]]
call[call[name[self].s_res, parameter[]].to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[apparent_powers.csv]]]]]
call[name[self].grid_losses.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[grid_losses.csv]]]]]
call[name[self].hv_mv_exchanges.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[hv_mv_exchanges.csv]]]]]
def function[_save_pypsa_network, parameter[target_dir]]:
if name[self].network.pypsa begin[:]
call[name[os].makedirs, parameter[name[target_dir]]]
call[name[self].network.pypsa.export_to_csv_folder, parameter[name[target_dir]]]
def function[_save_grid_expansion_results, parameter[target_dir]]:
if compare[name[self].grid_expansion_costs is_not constant[None]] begin[:]
call[name[os].makedirs, parameter[name[target_dir]]]
call[name[self].grid_expansion_costs.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[grid_expansion_costs.csv]]]]]
call[call[name[pd].DataFrame, parameter[name[self].unresolved_issues]].to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[unresolved_issues.csv]]]]]
call[name[self].equipment_changes.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[equipment_changes.csv]]]]]
def function[_save_curtailment_results, parameter[target_dir]]:
if compare[name[self].curtailment is_not constant[None]] begin[:]
call[name[os].makedirs, parameter[name[target_dir]]]
for taget[tuple[[<ast.Name object at 0x7da1b0334fd0>, <ast.Name object at 0x7da1b0335090>]]] in starred[call[name[self].curtailment.items, parameter[]]] begin[:]
if compare[call[name[type], parameter[name[key]]] equal[==] name[tuple]] begin[:]
variable[type_prefix] assign[=] call[constant[-].join, parameter[list[[<ast.Subscript object at 0x7da1b0335ea0>, <ast.Call object at 0x7da1b0335b10>]]]]
variable[filename] assign[=] call[name[os].path.join, parameter[name[target_dir], call[constant[{}.csv].format, parameter[name[type_prefix]]]]]
call[name[curtailment_df].to_csv, parameter[name[filename]]]
def function[_save_storage_integration_results, parameter[target_dir]]:
variable[storages] assign[=] name[self].storages
if <ast.UnaryOp object at 0x7da1b0335f00> begin[:]
call[name[os].makedirs, parameter[name[target_dir]]]
call[name[storages].to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[storages.csv]]]]]
<ast.Tuple object at 0x7da1b0334c40> assign[=] call[name[self].storages_timeseries, parameter[]]
call[name[ts_p].to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[storages_active_power.csv]]]]]
call[name[ts_q].to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[storages_reactive_power.csv]]]]]
if <ast.UnaryOp object at 0x7da1b03362c0> begin[:]
call[name[self].storages_costs_reduction.to_csv, parameter[call[name[os].path.join, parameter[name[target_dir], constant[storages_costs_reduction.csv]]]]]
variable[func_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0334b50>, <ast.Constant object at 0x7da1b0334070>, <ast.Constant object at 0x7da1b0334610>, <ast.Constant object at 0x7da1b0336620>, <ast.Constant object at 0x7da1b03375b0>], [<ast.Name object at 0x7da1b0334400>, <ast.Name object at 0x7da1b0335a80>, <ast.Name object at 0x7da1b0336d70>, <ast.Name object at 0x7da1b0335210>, <ast.Name object at 0x7da1b0337c40>]]
if call[name[isinstance], parameter[name[parameters], name[str]]] begin[:]
if compare[name[parameters] equal[==] constant[all]] begin[:]
variable[parameters] assign[=] list[[<ast.Constant object at 0x7da1b0336e90>, <ast.Constant object at 0x7da1b03349a0>, <ast.Constant object at 0x7da1b03372b0>, <ast.Constant object at 0x7da1b03359f0>, <ast.Constant object at 0x7da1b03342e0>]]
for taget[name[parameter]] in starred[name[parameters]] begin[:]
<ast.Try object at 0x7da1b0283e50>
call[call[name[pd].DataFrame, parameter[]].to_csv, parameter[call[name[os].path.join, parameter[name[directory], constant[measures.csv]]]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[directory], constant[configs.csv]]], constant[w]]] begin[:]
variable[writer] assign[=] call[name[csv].writer, parameter[name[f]]]
variable[rows] assign[=] <ast.ListComp object at 0x7da1b02d8a90>
call[name[writer].writerows, parameter[name[rows]]] | keyword[def] identifier[save] ( identifier[self] , identifier[directory] , identifier[parameters] = literal[string] ):
literal[string]
keyword[def] identifier[_save_power_flow_results] ( identifier[target_dir] ):
keyword[if] identifier[self] . identifier[pfa_v_mag_pu] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[makedirs] ( identifier[target_dir] , identifier[exist_ok] = keyword[True] )
identifier[self] . identifier[pfa_v_mag_pu] . identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[self] . identifier[i_res] . identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[self] . identifier[pfa_p] . identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[self] . identifier[pfa_q] . identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[self] . identifier[s_res] (). identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[self] . identifier[grid_losses] . identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[self] . identifier[hv_mv_exchanges] . identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] ))
keyword[def] identifier[_save_pypsa_network] ( identifier[target_dir] ):
keyword[if] identifier[self] . identifier[network] . identifier[pypsa] :
identifier[os] . identifier[makedirs] ( identifier[target_dir] , identifier[exist_ok] = keyword[True] )
identifier[self] . identifier[network] . identifier[pypsa] . identifier[export_to_csv_folder] ( identifier[target_dir] )
keyword[def] identifier[_save_grid_expansion_results] ( identifier[target_dir] ):
keyword[if] identifier[self] . identifier[grid_expansion_costs] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[makedirs] ( identifier[target_dir] , identifier[exist_ok] = keyword[True] )
identifier[self] . identifier[grid_expansion_costs] . identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] ))
identifier[pd] . identifier[DataFrame] ( identifier[self] . identifier[unresolved_issues] ). identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] ))
identifier[self] . identifier[equipment_changes] . identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] ))
keyword[def] identifier[_save_curtailment_results] ( identifier[target_dir] ):
keyword[if] identifier[self] . identifier[curtailment] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[makedirs] ( identifier[target_dir] , identifier[exist_ok] = keyword[True] )
keyword[for] identifier[key] , identifier[curtailment_df] keyword[in] identifier[self] . identifier[curtailment] . identifier[items] ():
keyword[if] identifier[type] ( identifier[key] )== identifier[tuple] :
identifier[type_prefix] = literal[string] . identifier[join] ([ identifier[key] [ literal[int] ], identifier[str] ( identifier[key] [ literal[int] ])])
keyword[elif] identifier[type] ( identifier[key] )== identifier[str] :
identifier[type_prefix] = identifier[key]
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] (
identifier[type] ( identifier[key] ), identifier[key] ))
identifier[filename] = identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] . identifier[format] ( identifier[type_prefix] ))
identifier[curtailment_df] . identifier[to_csv] ( identifier[filename] , identifier[index_label] = identifier[type_prefix] )
keyword[def] identifier[_save_storage_integration_results] ( identifier[target_dir] ):
identifier[storages] = identifier[self] . identifier[storages]
keyword[if] keyword[not] identifier[storages] . identifier[empty] :
identifier[os] . identifier[makedirs] ( identifier[target_dir] , identifier[exist_ok] = keyword[True] )
identifier[storages] . identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ))
identifier[ts_p] , identifier[ts_q] = identifier[self] . identifier[storages_timeseries] ()
identifier[ts_p] . identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] ))
identifier[ts_q] . identifier[to_csv] ( identifier[os] . identifier[path] . identifier[join] (
identifier[target_dir] , literal[string] ))
keyword[if] keyword[not] identifier[self] . identifier[storages_costs_reduction] keyword[is] keyword[None] :
identifier[self] . identifier[storages_costs_reduction] . identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] ,
literal[string] ))
identifier[func_dict] ={
literal[string] : identifier[_save_power_flow_results] ,
literal[string] : identifier[_save_pypsa_network] ,
literal[string] : identifier[_save_grid_expansion_results] ,
literal[string] : identifier[_save_curtailment_results] ,
literal[string] : identifier[_save_storage_integration_results]
}
keyword[if] identifier[isinstance] ( identifier[parameters] , identifier[str] ):
keyword[if] identifier[parameters] == literal[string] :
identifier[parameters] =[ literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] ]
keyword[else] :
identifier[parameters] =[ identifier[parameters] ]
keyword[for] identifier[parameter] keyword[in] identifier[parameters] :
keyword[try] :
identifier[func_dict] [ identifier[parameter] ]( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[parameter] ))
keyword[except] identifier[KeyError] :
identifier[message] = literal[string] literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[parameter] )
identifier[logger] . identifier[error] ( identifier[message] )
keyword[raise] identifier[KeyError] ( identifier[message] )
keyword[except] :
keyword[raise]
identifier[pd] . identifier[DataFrame] ( identifier[data] ={ literal[string] : identifier[self] . identifier[measures] }). identifier[to_csv] (
identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] ))
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] ), literal[string] ) keyword[as] identifier[f] :
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[f] )
identifier[rows] =[
[ literal[string] . identifier[format] ( identifier[key] )]+[ identifier[value] keyword[for] identifier[item] keyword[in] identifier[values] . identifier[items] ()
keyword[for] identifier[value] keyword[in] identifier[item] ]
keyword[for] identifier[key] , identifier[values] keyword[in] identifier[self] . identifier[network] . identifier[config] . identifier[_data] . identifier[items] ()]
identifier[writer] . identifier[writerows] ( identifier[rows] ) | def save(self, directory, parameters='all'):
"""
Saves results to disk.
Depending on which results are selected and if they exist, the
following directories and files are created:
* `powerflow_results` directory
* `voltages_pu.csv`
See :py:attr:`~pfa_v_mag_pu` for more information.
* `currents.csv`
See :func:`~i_res` for more information.
* `active_powers.csv`
See :py:attr:`~pfa_p` for more information.
* `reactive_powers.csv`
See :py:attr:`~pfa_q` for more information.
* `apparent_powers.csv`
See :func:`~s_res` for more information.
* `grid_losses.csv`
See :py:attr:`~grid_losses` for more information.
* `hv_mv_exchanges.csv`
See :py:attr:`~hv_mv_exchanges` for more information.
* `pypsa_network` directory
See :py:func:`pypsa.Network.export_to_csv_folder`
* `grid_expansion_results` directory
* `grid_expansion_costs.csv`
See :py:attr:`~grid_expansion_costs` for more information.
* `equipment_changes.csv`
See :py:attr:`~equipment_changes` for more information.
* `unresolved_issues.csv`
See :py:attr:`~unresolved_issues` for more information.
* `curtailment_results` directory
Files depend on curtailment specifications. There will be one file
for each curtailment specification, that is for every key in
:py:attr:`~curtailment` dictionary.
* `storage_integration_results` directory
* `storages.csv`
See :func:`~storages` for more information.
Parameters
----------
directory : :obj:`str`
Directory to save the results in.
parameters : :obj:`str` or :obj:`list` of :obj:`str`
Specifies which results will be saved. By default all results are
saved. To only save certain results set `parameters` to one of the
following options or choose several options by providing a list:
* 'pypsa_network'
* 'powerflow_results'
* 'grid_expansion_results'
* 'curtailment_results'
* 'storage_integration_results'
"""
def _save_power_flow_results(target_dir):
if self.pfa_v_mag_pu is not None:
# create directory
os.makedirs(target_dir, exist_ok=True)
# voltage
self.pfa_v_mag_pu.to_csv(os.path.join(target_dir, 'voltages_pu.csv'))
# current
self.i_res.to_csv(os.path.join(target_dir, 'currents.csv'))
# active power
self.pfa_p.to_csv(os.path.join(target_dir, 'active_powers.csv'))
# reactive power
self.pfa_q.to_csv(os.path.join(target_dir, 'reactive_powers.csv'))
# apparent power
self.s_res().to_csv(os.path.join(target_dir, 'apparent_powers.csv'))
# grid losses
self.grid_losses.to_csv(os.path.join(target_dir, 'grid_losses.csv'))
# grid exchanges
self.hv_mv_exchanges.to_csv(os.path.join(target_dir, 'hv_mv_exchanges.csv')) # depends on [control=['if'], data=[]]
def _save_pypsa_network(target_dir):
if self.network.pypsa:
# create directory
os.makedirs(target_dir, exist_ok=True)
self.network.pypsa.export_to_csv_folder(target_dir) # depends on [control=['if'], data=[]]
def _save_grid_expansion_results(target_dir):
if self.grid_expansion_costs is not None:
# create directory
os.makedirs(target_dir, exist_ok=True)
# grid expansion costs
self.grid_expansion_costs.to_csv(os.path.join(target_dir, 'grid_expansion_costs.csv'))
# unresolved issues
pd.DataFrame(self.unresolved_issues).to_csv(os.path.join(target_dir, 'unresolved_issues.csv'))
# equipment changes
self.equipment_changes.to_csv(os.path.join(target_dir, 'equipment_changes.csv')) # depends on [control=['if'], data=[]]
def _save_curtailment_results(target_dir):
if self.curtailment is not None:
# create directory
os.makedirs(target_dir, exist_ok=True)
for (key, curtailment_df) in self.curtailment.items():
if type(key) == tuple:
type_prefix = '-'.join([key[0], str(key[1])]) # depends on [control=['if'], data=[]]
elif type(key) == str:
type_prefix = key # depends on [control=['if'], data=[]]
else:
raise KeyError('Unknown key type {} for key {}'.format(type(key), key))
filename = os.path.join(target_dir, '{}.csv'.format(type_prefix))
curtailment_df.to_csv(filename, index_label=type_prefix) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
def _save_storage_integration_results(target_dir):
storages = self.storages
if not storages.empty:
# create directory
os.makedirs(target_dir, exist_ok=True)
# general storage information
storages.to_csv(os.path.join(target_dir, 'storages.csv'))
# storages time series
(ts_p, ts_q) = self.storages_timeseries()
ts_p.to_csv(os.path.join(target_dir, 'storages_active_power.csv'))
ts_q.to_csv(os.path.join(target_dir, 'storages_reactive_power.csv'))
if not self.storages_costs_reduction is None:
self.storages_costs_reduction.to_csv(os.path.join(target_dir, 'storages_costs_reduction.csv')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# dictionary with function to call to save each parameter
func_dict = {'powerflow_results': _save_power_flow_results, 'pypsa_network': _save_pypsa_network, 'grid_expansion_results': _save_grid_expansion_results, 'curtailment_results': _save_curtailment_results, 'storage_integration_results': _save_storage_integration_results}
# if string is given convert to list
if isinstance(parameters, str):
if parameters == 'all':
parameters = ['powerflow_results', 'pypsa_network', 'grid_expansion_results', 'curtailment_results', 'storage_integration_results'] # depends on [control=['if'], data=['parameters']]
else:
parameters = [parameters] # depends on [control=['if'], data=[]]
# save each parameter
for parameter in parameters:
try:
func_dict[parameter](os.path.join(directory, parameter)) # depends on [control=['try'], data=[]]
except KeyError:
message = "Invalid input {} for `parameters` when saving results. Must be any or a list of the following: 'pypsa_network', 'powerflow_results', 'grid_expansion_results', 'curtailment_results', 'storage_integration_results'.".format(parameter)
logger.error(message)
raise KeyError(message) # depends on [control=['except'], data=[]]
except:
raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['parameter']]
# save measures
pd.DataFrame(data={'measure': self.measures}).to_csv(os.path.join(directory, 'measures.csv'))
# save configs
with open(os.path.join(directory, 'configs.csv'), 'w') as f:
writer = csv.writer(f)
rows = [['{}'.format(key)] + [value for item in values.items() for value in item] for (key, values) in self.network.config._data.items()]
writer.writerows(rows) # depends on [control=['with'], data=['f']] |
def sync_one(self, aws_syncr, amazon, gateway):
"""Make sure this gateway exists and has only attributes we want it to have"""
gateway_info = amazon.apigateway.gateway_info(gateway.name, gateway.location)
if not gateway_info:
amazon.apigateway.create_gateway(gateway.name, gateway.location, gateway.stages, gateway.resources, gateway.api_keys, gateway.domain_names)
else:
amazon.apigateway.modify_gateway(gateway_info, gateway.name, gateway.location, gateway.stages, gateway.resources, gateway.api_keys, gateway.domain_names) | def function[sync_one, parameter[self, aws_syncr, amazon, gateway]]:
constant[Make sure this gateway exists and has only attributes we want it to have]
variable[gateway_info] assign[=] call[name[amazon].apigateway.gateway_info, parameter[name[gateway].name, name[gateway].location]]
if <ast.UnaryOp object at 0x7da2045656f0> begin[:]
call[name[amazon].apigateway.create_gateway, parameter[name[gateway].name, name[gateway].location, name[gateway].stages, name[gateway].resources, name[gateway].api_keys, name[gateway].domain_names]] | keyword[def] identifier[sync_one] ( identifier[self] , identifier[aws_syncr] , identifier[amazon] , identifier[gateway] ):
literal[string]
identifier[gateway_info] = identifier[amazon] . identifier[apigateway] . identifier[gateway_info] ( identifier[gateway] . identifier[name] , identifier[gateway] . identifier[location] )
keyword[if] keyword[not] identifier[gateway_info] :
identifier[amazon] . identifier[apigateway] . identifier[create_gateway] ( identifier[gateway] . identifier[name] , identifier[gateway] . identifier[location] , identifier[gateway] . identifier[stages] , identifier[gateway] . identifier[resources] , identifier[gateway] . identifier[api_keys] , identifier[gateway] . identifier[domain_names] )
keyword[else] :
identifier[amazon] . identifier[apigateway] . identifier[modify_gateway] ( identifier[gateway_info] , identifier[gateway] . identifier[name] , identifier[gateway] . identifier[location] , identifier[gateway] . identifier[stages] , identifier[gateway] . identifier[resources] , identifier[gateway] . identifier[api_keys] , identifier[gateway] . identifier[domain_names] ) | def sync_one(self, aws_syncr, amazon, gateway):
"""Make sure this gateway exists and has only attributes we want it to have"""
gateway_info = amazon.apigateway.gateway_info(gateway.name, gateway.location)
if not gateway_info:
amazon.apigateway.create_gateway(gateway.name, gateway.location, gateway.stages, gateway.resources, gateway.api_keys, gateway.domain_names) # depends on [control=['if'], data=[]]
else:
amazon.apigateway.modify_gateway(gateway_info, gateway.name, gateway.location, gateway.stages, gateway.resources, gateway.api_keys, gateway.domain_names) |
def _compare_list(new_list, old_list, change_list=None, root=None):
'''
a method for recursively listing changes made to a list
:param new_list: list with new value
:param old_list: list with old values
:param change_list: list of differences between old and new
:param root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
if len(old_list) > len(new_list):
same_len = len(new_list)
for i in reversed(range(len(new_list), len(old_list))):
new_path = deepcopy(root)
new_path.append(i)
change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path})
elif len(new_list) > len(old_list):
same_len = len(old_list)
append_list = []
path = deepcopy(root)
for i in range(len(old_list), len(new_list)):
append_list.append(new_list[i])
change_list.append({'action': 'APPEND', 'value': append_list, 'path': path})
else:
same_len = len(new_list)
for i in range(0, same_len):
new_path = deepcopy(root)
new_path.append(i)
if new_list[i].__class__ != old_list[i].__class__:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path})
elif isinstance(new_list[i], dict):
_compare_dict(new_list[i], old_list[i], change_list, new_path)
elif isinstance(new_list[i], list):
_compare_list(new_list[i], old_list[i], change_list, new_path)
elif isinstance(new_list[i], set):
_compare_set(new_list[i], old_list[i], change_list, new_path)
elif new_list[i] != old_list[i]:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path})
return change_list | def function[_compare_list, parameter[new_list, old_list, change_list, root]]:
constant[
a method for recursively listing changes made to a list
:param new_list: list with new value
:param old_list: list with old values
:param change_list: list of differences between old and new
:param root: string with record of path to the root of the main object
:return: list of differences between old and new
]
from relative_module[copy] import module[deepcopy]
if compare[call[name[len], parameter[name[old_list]]] greater[>] call[name[len], parameter[name[new_list]]]] begin[:]
variable[same_len] assign[=] call[name[len], parameter[name[new_list]]]
for taget[name[i]] in starred[call[name[reversed], parameter[call[name[range], parameter[call[name[len], parameter[name[new_list]]], call[name[len], parameter[name[old_list]]]]]]]] begin[:]
variable[new_path] assign[=] call[name[deepcopy], parameter[name[root]]]
call[name[new_path].append, parameter[name[i]]]
call[name[change_list].append, parameter[dictionary[[<ast.Constant object at 0x7da20c992b60>, <ast.Constant object at 0x7da20c992f50>, <ast.Constant object at 0x7da20c9932e0>], [<ast.Constant object at 0x7da20c991390>, <ast.Constant object at 0x7da20c991120>, <ast.Name object at 0x7da20c992da0>]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[same_len]]]] begin[:]
variable[new_path] assign[=] call[name[deepcopy], parameter[name[root]]]
call[name[new_path].append, parameter[name[i]]]
if compare[call[name[new_list]][name[i]].__class__ not_equal[!=] call[name[old_list]][name[i]].__class__] begin[:]
call[name[change_list].append, parameter[dictionary[[<ast.Constant object at 0x7da1b14d1960>, <ast.Constant object at 0x7da1b14d3250>, <ast.Constant object at 0x7da1b14d2380>], [<ast.Constant object at 0x7da1b14d0eb0>, <ast.Subscript object at 0x7da1b14d24a0>, <ast.Name object at 0x7da1b14d0b20>]]]]
return[name[change_list]] | keyword[def] identifier[_compare_list] ( identifier[new_list] , identifier[old_list] , identifier[change_list] = keyword[None] , identifier[root] = keyword[None] ):
literal[string]
keyword[from] identifier[copy] keyword[import] identifier[deepcopy]
keyword[if] identifier[len] ( identifier[old_list] )> identifier[len] ( identifier[new_list] ):
identifier[same_len] = identifier[len] ( identifier[new_list] )
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[range] ( identifier[len] ( identifier[new_list] ), identifier[len] ( identifier[old_list] ))):
identifier[new_path] = identifier[deepcopy] ( identifier[root] )
identifier[new_path] . identifier[append] ( identifier[i] )
identifier[change_list] . identifier[append] ({ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : identifier[new_path] })
keyword[elif] identifier[len] ( identifier[new_list] )> identifier[len] ( identifier[old_list] ):
identifier[same_len] = identifier[len] ( identifier[old_list] )
identifier[append_list] =[]
identifier[path] = identifier[deepcopy] ( identifier[root] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[old_list] ), identifier[len] ( identifier[new_list] )):
identifier[append_list] . identifier[append] ( identifier[new_list] [ identifier[i] ])
identifier[change_list] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[append_list] , literal[string] : identifier[path] })
keyword[else] :
identifier[same_len] = identifier[len] ( identifier[new_list] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[same_len] ):
identifier[new_path] = identifier[deepcopy] ( identifier[root] )
identifier[new_path] . identifier[append] ( identifier[i] )
keyword[if] identifier[new_list] [ identifier[i] ]. identifier[__class__] != identifier[old_list] [ identifier[i] ]. identifier[__class__] :
identifier[change_list] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[new_list] [ identifier[i] ], literal[string] : identifier[new_path] })
keyword[elif] identifier[isinstance] ( identifier[new_list] [ identifier[i] ], identifier[dict] ):
identifier[_compare_dict] ( identifier[new_list] [ identifier[i] ], identifier[old_list] [ identifier[i] ], identifier[change_list] , identifier[new_path] )
keyword[elif] identifier[isinstance] ( identifier[new_list] [ identifier[i] ], identifier[list] ):
identifier[_compare_list] ( identifier[new_list] [ identifier[i] ], identifier[old_list] [ identifier[i] ], identifier[change_list] , identifier[new_path] )
keyword[elif] identifier[isinstance] ( identifier[new_list] [ identifier[i] ], identifier[set] ):
identifier[_compare_set] ( identifier[new_list] [ identifier[i] ], identifier[old_list] [ identifier[i] ], identifier[change_list] , identifier[new_path] )
keyword[elif] identifier[new_list] [ identifier[i] ]!= identifier[old_list] [ identifier[i] ]:
identifier[change_list] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[new_list] [ identifier[i] ], literal[string] : identifier[new_path] })
keyword[return] identifier[change_list] | def _compare_list(new_list, old_list, change_list=None, root=None):
"""
a method for recursively listing changes made to a list
:param new_list: list with new value
:param old_list: list with old values
:param change_list: list of differences between old and new
:param root: string with record of path to the root of the main object
:return: list of differences between old and new
"""
from copy import deepcopy
if len(old_list) > len(new_list):
same_len = len(new_list)
for i in reversed(range(len(new_list), len(old_list))):
new_path = deepcopy(root)
new_path.append(i)
change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path}) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
elif len(new_list) > len(old_list):
same_len = len(old_list)
append_list = []
path = deepcopy(root)
for i in range(len(old_list), len(new_list)):
append_list.append(new_list[i]) # depends on [control=['for'], data=['i']]
change_list.append({'action': 'APPEND', 'value': append_list, 'path': path}) # depends on [control=['if'], data=[]]
else:
same_len = len(new_list)
for i in range(0, same_len):
new_path = deepcopy(root)
new_path.append(i)
if new_list[i].__class__ != old_list[i].__class__:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) # depends on [control=['if'], data=[]]
elif isinstance(new_list[i], dict):
_compare_dict(new_list[i], old_list[i], change_list, new_path) # depends on [control=['if'], data=[]]
elif isinstance(new_list[i], list):
_compare_list(new_list[i], old_list[i], change_list, new_path) # depends on [control=['if'], data=[]]
elif isinstance(new_list[i], set):
_compare_set(new_list[i], old_list[i], change_list, new_path) # depends on [control=['if'], data=[]]
elif new_list[i] != old_list[i]:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return change_list |
def upload_complete(self, path, url, quiet):
""" function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
"""
file_size = os.path.getsize(path)
try:
with tqdm(
total=file_size,
unit='B',
unit_scale=True,
unit_divisor=1024,
disable=quiet) as progress_bar:
with io.open(path, 'rb', buffering=0) as fp:
reader = TqdmBufferedReader(fp, progress_bar)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(url, data=reader)
except Exception as error:
print(error)
return False
return response.status_code == 200 or response.status_code == 201 | def function[upload_complete, parameter[self, path, url, quiet]]:
constant[ function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
]
variable[file_size] assign[=] call[name[os].path.getsize, parameter[name[path]]]
<ast.Try object at 0x7da1b2114400>
return[<ast.BoolOp object at 0x7da1b2123580>] | keyword[def] identifier[upload_complete] ( identifier[self] , identifier[path] , identifier[url] , identifier[quiet] ):
literal[string]
identifier[file_size] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[path] )
keyword[try] :
keyword[with] identifier[tqdm] (
identifier[total] = identifier[file_size] ,
identifier[unit] = literal[string] ,
identifier[unit_scale] = keyword[True] ,
identifier[unit_divisor] = literal[int] ,
identifier[disable] = identifier[quiet] ) keyword[as] identifier[progress_bar] :
keyword[with] identifier[io] . identifier[open] ( identifier[path] , literal[string] , identifier[buffering] = literal[int] ) keyword[as] identifier[fp] :
identifier[reader] = identifier[TqdmBufferedReader] ( identifier[fp] , identifier[progress_bar] )
identifier[session] = identifier[requests] . identifier[Session] ()
identifier[retries] = identifier[Retry] ( identifier[total] = literal[int] , identifier[backoff_factor] = literal[int] )
identifier[adapter] = identifier[HTTPAdapter] ( identifier[max_retries] = identifier[retries] )
identifier[session] . identifier[mount] ( literal[string] , identifier[adapter] )
identifier[session] . identifier[mount] ( literal[string] , identifier[adapter] )
identifier[response] = identifier[session] . identifier[put] ( identifier[url] , identifier[data] = identifier[reader] )
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[print] ( identifier[error] )
keyword[return] keyword[False]
keyword[return] identifier[response] . identifier[status_code] == literal[int] keyword[or] identifier[response] . identifier[status_code] == literal[int] | def upload_complete(self, path, url, quiet):
""" function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
"""
file_size = os.path.getsize(path)
try:
with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024, disable=quiet) as progress_bar:
with io.open(path, 'rb', buffering=0) as fp:
reader = TqdmBufferedReader(fp, progress_bar)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(url, data=reader) # depends on [control=['with'], data=['fp']] # depends on [control=['with'], data=['progress_bar']] # depends on [control=['try'], data=[]]
except Exception as error:
print(error)
return False # depends on [control=['except'], data=['error']]
return response.status_code == 200 or response.status_code == 201 |
def delete_example(self, example_id,
url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json() | def function[delete_example, parameter[self, example_id, url]]:
constant[删除例句]
variable[url] assign[=] call[name[url].format, parameter[]]
return[call[call[name[self]._request, parameter[name[url]]].json, parameter[]]] | keyword[def] identifier[delete_example] ( identifier[self] , identifier[example_id] ,
identifier[url] = literal[string] ):
literal[string]
identifier[url] = identifier[url] . identifier[format] ( identifier[example_id] = identifier[example_id] )
keyword[return] identifier[self] . identifier[_request] ( identifier[url] , identifier[method] = literal[string] ). identifier[json] () | def delete_example(self, example_id, url='https://api.shanbay.com/bdc/example/{example_id}/'):
"""删除例句"""
url = url.format(example_id=example_id)
return self._request(url, method='delete').json() |
def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
"""
audiodata = onp.array(audiodata)
if step is None:
step = self._step
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(sample_list),
encoded_audio_string=encoded_audio_bytes,
content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step) | def function[audio, parameter[self, tag, audiodata, step, sample_rate]]:
constant[Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
]
variable[audiodata] assign[=] call[name[onp].array, parameter[name[audiodata]]]
if compare[name[step] is constant[None]] begin[:]
variable[step] assign[=] name[self]._step
variable[audiodata] assign[=] call[name[onp].clip, parameter[call[name[onp].squeeze, parameter[name[audiodata]]], <ast.UnaryOp object at 0x7da1b1e15840>, constant[1]]]
if compare[name[audiodata].ndim not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1e15b70>
variable[sample_list] assign[=] call[call[binary_operation[constant[32767.0] * name[audiodata]].astype, parameter[name[int]]].tolist, parameter[]]
variable[wio] assign[=] call[name[io].BytesIO, parameter[]]
variable[wav_buf] assign[=] call[name[wave].open, parameter[name[wio], constant[wb]]]
call[name[wav_buf].setnchannels, parameter[constant[1]]]
call[name[wav_buf].setsampwidth, parameter[constant[2]]]
call[name[wav_buf].setframerate, parameter[name[sample_rate]]]
variable[enc] assign[=] call[constant[b''].join, parameter[<ast.ListComp object at 0x7da1b1e16c80>]]
call[name[wav_buf].writeframes, parameter[name[enc]]]
call[name[wav_buf].close, parameter[]]
variable[encoded_audio_bytes] assign[=] call[name[wio].getvalue, parameter[]]
call[name[wio].close, parameter[]]
variable[audio] assign[=] call[name[Summary].Audio, parameter[]]
variable[summary] assign[=] call[name[Summary], parameter[]]
call[name[self].add_summary, parameter[name[summary], name[step]]] | keyword[def] identifier[audio] ( identifier[self] , identifier[tag] , identifier[audiodata] , identifier[step] = keyword[None] , identifier[sample_rate] = literal[int] ):
literal[string]
identifier[audiodata] = identifier[onp] . identifier[array] ( identifier[audiodata] )
keyword[if] identifier[step] keyword[is] keyword[None] :
identifier[step] = identifier[self] . identifier[_step]
keyword[else] :
identifier[self] . identifier[_step] = identifier[step]
identifier[audiodata] = identifier[onp] . identifier[clip] ( identifier[onp] . identifier[squeeze] ( identifier[audiodata] ),- literal[int] , literal[int] )
keyword[if] identifier[audiodata] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[sample_list] =( literal[int] * identifier[audiodata] ). identifier[astype] ( identifier[int] ). identifier[tolist] ()
identifier[wio] = identifier[io] . identifier[BytesIO] ()
identifier[wav_buf] = identifier[wave] . identifier[open] ( identifier[wio] , literal[string] )
identifier[wav_buf] . identifier[setnchannels] ( literal[int] )
identifier[wav_buf] . identifier[setsampwidth] ( literal[int] )
identifier[wav_buf] . identifier[setframerate] ( identifier[sample_rate] )
identifier[enc] = literal[string] . identifier[join] ([ identifier[struct] . identifier[pack] ( literal[string] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[sample_list] ])
identifier[wav_buf] . identifier[writeframes] ( identifier[enc] )
identifier[wav_buf] . identifier[close] ()
identifier[encoded_audio_bytes] = identifier[wio] . identifier[getvalue] ()
identifier[wio] . identifier[close] ()
identifier[audio] = identifier[Summary] . identifier[Audio] (
identifier[sample_rate] = identifier[sample_rate] ,
identifier[num_channels] = literal[int] ,
identifier[length_frames] = identifier[len] ( identifier[sample_list] ),
identifier[encoded_audio_string] = identifier[encoded_audio_bytes] ,
identifier[content_type] = literal[string] )
identifier[summary] = identifier[Summary] ( identifier[value] =[ identifier[Summary] . identifier[Value] ( identifier[tag] = identifier[tag] , identifier[audio] = identifier[audio] )])
identifier[self] . identifier[add_summary] ( identifier[summary] , identifier[step] ) | def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
"""
audiodata = onp.array(audiodata)
if step is None:
step = self._step # depends on [control=['if'], data=['step']]
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.') # depends on [control=['if'], data=[]]
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(sample_rate=sample_rate, num_channels=1, length_frames=len(sample_list), encoded_audio_string=encoded_audio_bytes, content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step) |
def generate_text(self, generation_type='markov'):
""" Generates sentences from a given corpus
Args:
generation_type: 'markov' | 'hmm' | 'hmm_past'
Returns:
Properly formatted string of generated sentences
"""
assert generation_type in ['markov', 'hmm', 'hmm_past']
if generation_type == "markov":
return self._text_generator(next_token=self._generate_next_token)
elif generation_type == "hmm":
return self._text_generator(next_token=self._generate_next_token_hmm, emit=self._emitHMM)
elif generation_type == "hmm_past":
return self._text_generator(next_token=self._generate_next_token_hmm, emit=self._emitHMM_with_past) | def function[generate_text, parameter[self, generation_type]]:
constant[ Generates sentences from a given corpus
Args:
generation_type: 'markov' | 'hmm' | 'hmm_past'
Returns:
Properly formatted string of generated sentences
]
assert[compare[name[generation_type] in list[[<ast.Constant object at 0x7da2047eb3d0>, <ast.Constant object at 0x7da2047e8400>, <ast.Constant object at 0x7da2047e89d0>]]]]
if compare[name[generation_type] equal[==] constant[markov]] begin[:]
return[call[name[self]._text_generator, parameter[]]] | keyword[def] identifier[generate_text] ( identifier[self] , identifier[generation_type] = literal[string] ):
literal[string]
keyword[assert] identifier[generation_type] keyword[in] [ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[generation_type] == literal[string] :
keyword[return] identifier[self] . identifier[_text_generator] ( identifier[next_token] = identifier[self] . identifier[_generate_next_token] )
keyword[elif] identifier[generation_type] == literal[string] :
keyword[return] identifier[self] . identifier[_text_generator] ( identifier[next_token] = identifier[self] . identifier[_generate_next_token_hmm] , identifier[emit] = identifier[self] . identifier[_emitHMM] )
keyword[elif] identifier[generation_type] == literal[string] :
keyword[return] identifier[self] . identifier[_text_generator] ( identifier[next_token] = identifier[self] . identifier[_generate_next_token_hmm] , identifier[emit] = identifier[self] . identifier[_emitHMM_with_past] ) | def generate_text(self, generation_type='markov'):
""" Generates sentences from a given corpus
Args:
generation_type: 'markov' | 'hmm' | 'hmm_past'
Returns:
Properly formatted string of generated sentences
"""
assert generation_type in ['markov', 'hmm', 'hmm_past']
if generation_type == 'markov':
return self._text_generator(next_token=self._generate_next_token) # depends on [control=['if'], data=[]]
elif generation_type == 'hmm':
return self._text_generator(next_token=self._generate_next_token_hmm, emit=self._emitHMM) # depends on [control=['if'], data=[]]
elif generation_type == 'hmm_past':
return self._text_generator(next_token=self._generate_next_token_hmm, emit=self._emitHMM_with_past) # depends on [control=['if'], data=[]] |
def save_variables_to_hdf5(file_path, variables, mode='w', h5path='/'):
"""
Parameters
----------
file_path: str
variables: dict
Dictionary with objects. Object name -> object
mode: str
HDF5 file access mode
See h5py documentation for details.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
Notes
-----
It is recommended to use numpy arrays as objects.
List or tuples of strings won't work, convert them into numpy.arrays before.
"""
if not isinstance(variables, dict):
raise ValueError('Expected `variables` to be a dict, got a {}.'.format(type(variables)))
if not variables:
raise ValueError('Expected `variables` to be a non-empty dict.')
h5file = h5py.File(file_path, mode=mode)
h5group = h5file.require_group(h5path)
for vn in variables:
data = variables[vn]
# fix for string numpy arrays
if hasattr(data, 'dtype') and (data.dtype.type is np.string_ or data.dtype.type is np.unicode_):
dt = h5py.special_dtype(vlen=str)
data = data.astype(dt)
if isinstance(data, dict):
for key in data:
#h5group.create_dataset(str(key))
#import ipdb
#ipdb.set_trace()
h5group[str(key)] = data[key]
elif isinstance(data, list):
for idx, item in enumerate(data):
#h5group.create_dataset(str(idx))
h5group[str(idx)] = item
else:
h5group[vn] = data
h5file.close() | def function[save_variables_to_hdf5, parameter[file_path, variables, mode, h5path]]:
constant[
Parameters
----------
file_path: str
variables: dict
Dictionary with objects. Object name -> object
mode: str
HDF5 file access mode
See h5py documentation for details.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
Notes
-----
It is recommended to use numpy arrays as objects.
List or tuples of strings won't work, convert them into numpy.arrays before.
]
if <ast.UnaryOp object at 0x7da1afe7bd00> begin[:]
<ast.Raise object at 0x7da1afe79630>
if <ast.UnaryOp object at 0x7da1afe7aaa0> begin[:]
<ast.Raise object at 0x7da1afe79bd0>
variable[h5file] assign[=] call[name[h5py].File, parameter[name[file_path]]]
variable[h5group] assign[=] call[name[h5file].require_group, parameter[name[h5path]]]
for taget[name[vn]] in starred[name[variables]] begin[:]
variable[data] assign[=] call[name[variables]][name[vn]]
if <ast.BoolOp object at 0x7da1afe78a30> begin[:]
variable[dt] assign[=] call[name[h5py].special_dtype, parameter[]]
variable[data] assign[=] call[name[data].astype, parameter[name[dt]]]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
for taget[name[key]] in starred[name[data]] begin[:]
call[name[h5group]][call[name[str], parameter[name[key]]]] assign[=] call[name[data]][name[key]]
call[name[h5file].close, parameter[]] | keyword[def] identifier[save_variables_to_hdf5] ( identifier[file_path] , identifier[variables] , identifier[mode] = literal[string] , identifier[h5path] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[variables] , identifier[dict] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[variables] )))
keyword[if] keyword[not] identifier[variables] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[h5file] = identifier[h5py] . identifier[File] ( identifier[file_path] , identifier[mode] = identifier[mode] )
identifier[h5group] = identifier[h5file] . identifier[require_group] ( identifier[h5path] )
keyword[for] identifier[vn] keyword[in] identifier[variables] :
identifier[data] = identifier[variables] [ identifier[vn] ]
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ) keyword[and] ( identifier[data] . identifier[dtype] . identifier[type] keyword[is] identifier[np] . identifier[string_] keyword[or] identifier[data] . identifier[dtype] . identifier[type] keyword[is] identifier[np] . identifier[unicode_] ):
identifier[dt] = identifier[h5py] . identifier[special_dtype] ( identifier[vlen] = identifier[str] )
identifier[data] = identifier[data] . identifier[astype] ( identifier[dt] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[for] identifier[key] keyword[in] identifier[data] :
identifier[h5group] [ identifier[str] ( identifier[key] )]= identifier[data] [ identifier[key] ]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ):
keyword[for] identifier[idx] , identifier[item] keyword[in] identifier[enumerate] ( identifier[data] ):
identifier[h5group] [ identifier[str] ( identifier[idx] )]= identifier[item]
keyword[else] :
identifier[h5group] [ identifier[vn] ]= identifier[data]
identifier[h5file] . identifier[close] () | def save_variables_to_hdf5(file_path, variables, mode='w', h5path='/'):
"""
Parameters
----------
file_path: str
variables: dict
Dictionary with objects. Object name -> object
mode: str
HDF5 file access mode
See h5py documentation for details.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
Notes
-----
It is recommended to use numpy arrays as objects.
List or tuples of strings won't work, convert them into numpy.arrays before.
"""
if not isinstance(variables, dict):
raise ValueError('Expected `variables` to be a dict, got a {}.'.format(type(variables))) # depends on [control=['if'], data=[]]
if not variables:
raise ValueError('Expected `variables` to be a non-empty dict.') # depends on [control=['if'], data=[]]
h5file = h5py.File(file_path, mode=mode)
h5group = h5file.require_group(h5path)
for vn in variables:
data = variables[vn]
# fix for string numpy arrays
if hasattr(data, 'dtype') and (data.dtype.type is np.string_ or data.dtype.type is np.unicode_):
dt = h5py.special_dtype(vlen=str)
data = data.astype(dt) # depends on [control=['if'], data=[]]
if isinstance(data, dict):
for key in data:
#h5group.create_dataset(str(key))
#import ipdb
#ipdb.set_trace()
h5group[str(key)] = data[key] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
elif isinstance(data, list):
for (idx, item) in enumerate(data):
#h5group.create_dataset(str(idx))
h5group[str(idx)] = item # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
h5group[vn] = data # depends on [control=['for'], data=['vn']]
h5file.close() |
def get_ferromagnetic_structure(self, make_primitive=True):
"""
Returns a Structure with all magnetic moments positive
or zero.
:param make_primitive (bool): Return a primitive
structure, defaults to True.
:return: Structure
"""
structure = self.structure.copy()
structure.add_site_property("magmom", [abs(m) for m in self.magmoms])
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
return structure | def function[get_ferromagnetic_structure, parameter[self, make_primitive]]:
constant[
Returns a Structure with all magnetic moments positive
or zero.
:param make_primitive (bool): Return a primitive
structure, defaults to True.
:return: Structure
]
variable[structure] assign[=] call[name[self].structure.copy, parameter[]]
call[name[structure].add_site_property, parameter[constant[magmom], <ast.ListComp object at 0x7da1b21a0dc0>]]
if name[make_primitive] begin[:]
variable[structure] assign[=] call[name[structure].get_primitive_structure, parameter[]]
return[name[structure]] | keyword[def] identifier[get_ferromagnetic_structure] ( identifier[self] , identifier[make_primitive] = keyword[True] ):
literal[string]
identifier[structure] = identifier[self] . identifier[structure] . identifier[copy] ()
identifier[structure] . identifier[add_site_property] ( literal[string] ,[ identifier[abs] ( identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[self] . identifier[magmoms] ])
keyword[if] identifier[make_primitive] :
identifier[structure] = identifier[structure] . identifier[get_primitive_structure] ( identifier[use_site_props] = keyword[True] )
keyword[return] identifier[structure] | def get_ferromagnetic_structure(self, make_primitive=True):
"""
Returns a Structure with all magnetic moments positive
or zero.
:param make_primitive (bool): Return a primitive
structure, defaults to True.
:return: Structure
"""
structure = self.structure.copy()
structure.add_site_property('magmom', [abs(m) for m in self.magmoms])
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True) # depends on [control=['if'], data=[]]
return structure |
def set_column_stretch(self, column=0, stretch=10):
"""
Sets the column stretch. Larger numbers mean it will expand more to
fill space.
"""
self._layout.setColumnStretch(column, stretch)
return self | def function[set_column_stretch, parameter[self, column, stretch]]:
constant[
Sets the column stretch. Larger numbers mean it will expand more to
fill space.
]
call[name[self]._layout.setColumnStretch, parameter[name[column], name[stretch]]]
return[name[self]] | keyword[def] identifier[set_column_stretch] ( identifier[self] , identifier[column] = literal[int] , identifier[stretch] = literal[int] ):
literal[string]
identifier[self] . identifier[_layout] . identifier[setColumnStretch] ( identifier[column] , identifier[stretch] )
keyword[return] identifier[self] | def set_column_stretch(self, column=0, stretch=10):
"""
Sets the column stretch. Larger numbers mean it will expand more to
fill space.
"""
self._layout.setColumnStretch(column, stretch)
return self |
def acknowledge_host_problem(self, host, sticky, notify, author, comment):
"""Acknowledge a host problem
Format of the line that triggers function call::
ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>;
<comment>
:param host: host to acknowledge the problem
:type host: alignak.objects.host.Host
:param sticky: if sticky == 2, the acknowledge will remain until the host returns to an
UP state else the acknowledge will be removed as soon as the host state changes
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:return: None
TODO: add a better ACK management
"""
notification_period = None
if getattr(host, 'notification_period', None) is not None:
notification_period = self.daemon.timeperiods[host.notification_period]
host.acknowledge_problem(notification_period, self.hosts, self.services, sticky,
notify, author, comment) | def function[acknowledge_host_problem, parameter[self, host, sticky, notify, author, comment]]:
constant[Acknowledge a host problem
Format of the line that triggers function call::
ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>;
<comment>
:param host: host to acknowledge the problem
:type host: alignak.objects.host.Host
:param sticky: if sticky == 2, the acknowledge will remain until the host returns to an
UP state else the acknowledge will be removed as soon as the host state changes
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:return: None
TODO: add a better ACK management
]
variable[notification_period] assign[=] constant[None]
if compare[call[name[getattr], parameter[name[host], constant[notification_period], constant[None]]] is_not constant[None]] begin[:]
variable[notification_period] assign[=] call[name[self].daemon.timeperiods][name[host].notification_period]
call[name[host].acknowledge_problem, parameter[name[notification_period], name[self].hosts, name[self].services, name[sticky], name[notify], name[author], name[comment]]] | keyword[def] identifier[acknowledge_host_problem] ( identifier[self] , identifier[host] , identifier[sticky] , identifier[notify] , identifier[author] , identifier[comment] ):
literal[string]
identifier[notification_period] = keyword[None]
keyword[if] identifier[getattr] ( identifier[host] , literal[string] , keyword[None] ) keyword[is] keyword[not] keyword[None] :
identifier[notification_period] = identifier[self] . identifier[daemon] . identifier[timeperiods] [ identifier[host] . identifier[notification_period] ]
identifier[host] . identifier[acknowledge_problem] ( identifier[notification_period] , identifier[self] . identifier[hosts] , identifier[self] . identifier[services] , identifier[sticky] ,
identifier[notify] , identifier[author] , identifier[comment] ) | def acknowledge_host_problem(self, host, sticky, notify, author, comment):
"""Acknowledge a host problem
Format of the line that triggers function call::
ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>;
<comment>
:param host: host to acknowledge the problem
:type host: alignak.objects.host.Host
:param sticky: if sticky == 2, the acknowledge will remain until the host returns to an
UP state else the acknowledge will be removed as soon as the host state changes
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:return: None
TODO: add a better ACK management
"""
notification_period = None
if getattr(host, 'notification_period', None) is not None:
notification_period = self.daemon.timeperiods[host.notification_period] # depends on [control=['if'], data=[]]
host.acknowledge_problem(notification_period, self.hosts, self.services, sticky, notify, author, comment) |
def num_to_ith(num):
"""1 becomes 1st, 2 becomes 2nd, etc."""
value = str(num)
before_last_digit = value[-2]
last_digit = value[-1]
if len(value) > 1 and before_last_digit == '1': return value +'th'
if last_digit == '1': return value + 'st'
if last_digit == '2': return value + 'nd'
if last_digit == '3': return value + 'rd'
return value + 'th' | def function[num_to_ith, parameter[num]]:
constant[1 becomes 1st, 2 becomes 2nd, etc.]
variable[value] assign[=] call[name[str], parameter[name[num]]]
variable[before_last_digit] assign[=] call[name[value]][<ast.UnaryOp object at 0x7da212db4c70>]
variable[last_digit] assign[=] call[name[value]][<ast.UnaryOp object at 0x7da18f7202e0>]
if <ast.BoolOp object at 0x7da18f721f30> begin[:]
return[binary_operation[name[value] + constant[th]]]
if compare[name[last_digit] equal[==] constant[1]] begin[:]
return[binary_operation[name[value] + constant[st]]]
if compare[name[last_digit] equal[==] constant[2]] begin[:]
return[binary_operation[name[value] + constant[nd]]]
if compare[name[last_digit] equal[==] constant[3]] begin[:]
return[binary_operation[name[value] + constant[rd]]]
return[binary_operation[name[value] + constant[th]]] | keyword[def] identifier[num_to_ith] ( identifier[num] ):
literal[string]
identifier[value] = identifier[str] ( identifier[num] )
identifier[before_last_digit] = identifier[value] [- literal[int] ]
identifier[last_digit] = identifier[value] [- literal[int] ]
keyword[if] identifier[len] ( identifier[value] )> literal[int] keyword[and] identifier[before_last_digit] == literal[string] : keyword[return] identifier[value] + literal[string]
keyword[if] identifier[last_digit] == literal[string] : keyword[return] identifier[value] + literal[string]
keyword[if] identifier[last_digit] == literal[string] : keyword[return] identifier[value] + literal[string]
keyword[if] identifier[last_digit] == literal[string] : keyword[return] identifier[value] + literal[string]
keyword[return] identifier[value] + literal[string] | def num_to_ith(num):
"""1 becomes 1st, 2 becomes 2nd, etc."""
value = str(num)
before_last_digit = value[-2]
last_digit = value[-1]
if len(value) > 1 and before_last_digit == '1':
return value + 'th' # depends on [control=['if'], data=[]]
if last_digit == '1':
return value + 'st' # depends on [control=['if'], data=[]]
if last_digit == '2':
return value + 'nd' # depends on [control=['if'], data=[]]
if last_digit == '3':
return value + 'rd' # depends on [control=['if'], data=[]]
return value + 'th' |
def table2dicts(html):
"""
Converts a html table to a list of dictionaries, for example:
>>> table2dicts('''
... <table>
... <thead>
... <tr><th>a</th><th>b</th><th>c</th></tr>
... </thead>
... <tbody>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </tbody>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
It is also possibly to convert a html table with no thead / tbody,
in which case the first row is used as headers:
>>> table2dicts('''
... <table>
... <tr><th>a</th><th>b</th><th>c</th></tr>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
Similarly, when no th is present, the first row of td is used as
headers:
>>> table2dicts('''
... <table>
... <tr><td>a</td><td>b</td><td>c</td></tr>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
:param html: The html table to convert to a list of dictionaries.
:return: list of dictionaries with data from the html.
"""
def _get_headers_and_values(soup):
headers = [x.string or '' for x in soup.select('table tr th')]
if len(headers) == 0:
# maybe no th specified? just use the first row of td's
headers = [x.string or '' for x in soup.select('table tr')[0].select('td')]
values = [x or '' for x in soup.select('table tr')][1:]
return headers, values
soup = BeautifulSoup(html)
headers, values = _get_headers_and_values(soup)
result = [
OrderedDict([
(str(headers[i]), str(y.decode_contents()))
for i, y in enumerate(x.select('td'))
])
for x in values
]
return result | def function[table2dicts, parameter[html]]:
constant[
Converts a html table to a list of dictionaries, for example:
>>> table2dicts('''
... <table>
... <thead>
... <tr><th>a</th><th>b</th><th>c</th></tr>
... </thead>
... <tbody>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </tbody>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
It is also possibly to convert a html table with no thead / tbody,
in which case the first row is used as headers:
>>> table2dicts('''
... <table>
... <tr><th>a</th><th>b</th><th>c</th></tr>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
Similarly, when no th is present, the first row of td is used as
headers:
>>> table2dicts('''
... <table>
... <tr><td>a</td><td>b</td><td>c</td></tr>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
:param html: The html table to convert to a list of dictionaries.
:return: list of dictionaries with data from the html.
]
def function[_get_headers_and_values, parameter[soup]]:
variable[headers] assign[=] <ast.ListComp object at 0x7da20e9b1a80>
if compare[call[name[len], parameter[name[headers]]] equal[==] constant[0]] begin[:]
variable[headers] assign[=] <ast.ListComp object at 0x7da20e9b2bf0>
variable[values] assign[=] call[<ast.ListComp object at 0x7da18c4cf640>][<ast.Slice object at 0x7da18c4cf190>]
return[tuple[[<ast.Name object at 0x7da18c4cfb80>, <ast.Name object at 0x7da18c4cfb50>]]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[html]]]
<ast.Tuple object at 0x7da18c4cf9d0> assign[=] call[name[_get_headers_and_values], parameter[name[soup]]]
variable[result] assign[=] <ast.ListComp object at 0x7da18c4cf160>
return[name[result]] | keyword[def] identifier[table2dicts] ( identifier[html] ):
literal[string]
keyword[def] identifier[_get_headers_and_values] ( identifier[soup] ):
identifier[headers] =[ identifier[x] . identifier[string] keyword[or] literal[string] keyword[for] identifier[x] keyword[in] identifier[soup] . identifier[select] ( literal[string] )]
keyword[if] identifier[len] ( identifier[headers] )== literal[int] :
identifier[headers] =[ identifier[x] . identifier[string] keyword[or] literal[string] keyword[for] identifier[x] keyword[in] identifier[soup] . identifier[select] ( literal[string] )[ literal[int] ]. identifier[select] ( literal[string] )]
identifier[values] =[ identifier[x] keyword[or] literal[string] keyword[for] identifier[x] keyword[in] identifier[soup] . identifier[select] ( literal[string] )][ literal[int] :]
keyword[return] identifier[headers] , identifier[values]
identifier[soup] = identifier[BeautifulSoup] ( identifier[html] )
identifier[headers] , identifier[values] = identifier[_get_headers_and_values] ( identifier[soup] )
identifier[result] =[
identifier[OrderedDict] ([
( identifier[str] ( identifier[headers] [ identifier[i] ]), identifier[str] ( identifier[y] . identifier[decode_contents] ()))
keyword[for] identifier[i] , identifier[y] keyword[in] identifier[enumerate] ( identifier[x] . identifier[select] ( literal[string] ))
])
keyword[for] identifier[x] keyword[in] identifier[values]
]
keyword[return] identifier[result] | def table2dicts(html):
"""
Converts a html table to a list of dictionaries, for example:
>>> table2dicts('''
... <table>
... <thead>
... <tr><th>a</th><th>b</th><th>c</th></tr>
... </thead>
... <tbody>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </tbody>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
It is also possibly to convert a html table with no thead / tbody,
in which case the first row is used as headers:
>>> table2dicts('''
... <table>
... <tr><th>a</th><th>b</th><th>c</th></tr>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
Similarly, when no th is present, the first row of td is used as
headers:
>>> table2dicts('''
... <table>
... <tr><td>a</td><td>b</td><td>c</td></tr>
... <tr><td>1</td><td>2</td><td>3</td></tr>
... <tr><td>4</td><td>5</td><td>6</td></tr>
... </table>
... ''')
[OrderedDict([('a', '1'), ('b', '2'), ('c', '3')]), OrderedDict([('a', '4'), ('b', '5'), ('c', '6')])]
:param html: The html table to convert to a list of dictionaries.
:return: list of dictionaries with data from the html.
"""
def _get_headers_and_values(soup):
headers = [x.string or '' for x in soup.select('table tr th')]
if len(headers) == 0:
# maybe no th specified? just use the first row of td's
headers = [x.string or '' for x in soup.select('table tr')[0].select('td')] # depends on [control=['if'], data=[]]
values = [x or '' for x in soup.select('table tr')][1:]
return (headers, values)
soup = BeautifulSoup(html)
(headers, values) = _get_headers_and_values(soup)
result = [OrderedDict([(str(headers[i]), str(y.decode_contents())) for (i, y) in enumerate(x.select('td'))]) for x in values]
return result |
def receive_nack(self, msg):
'''
Returns a new Prepare message if the number of Nacks received reaches
a quorum.
'''
self.observe_proposal(msg.promised_proposal_id)
if msg.proposal_id == self.proposal_id and self.nacks_received is not None:
self.nacks_received.add(msg.from_uid)
if len(self.nacks_received) == self.quorum_size:
return self.prepare() | def function[receive_nack, parameter[self, msg]]:
constant[
Returns a new Prepare message if the number of Nacks received reaches
a quorum.
]
call[name[self].observe_proposal, parameter[name[msg].promised_proposal_id]]
if <ast.BoolOp object at 0x7da1b16c2b60> begin[:]
call[name[self].nacks_received.add, parameter[name[msg].from_uid]]
if compare[call[name[len], parameter[name[self].nacks_received]] equal[==] name[self].quorum_size] begin[:]
return[call[name[self].prepare, parameter[]]] | keyword[def] identifier[receive_nack] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[observe_proposal] ( identifier[msg] . identifier[promised_proposal_id] )
keyword[if] identifier[msg] . identifier[proposal_id] == identifier[self] . identifier[proposal_id] keyword[and] identifier[self] . identifier[nacks_received] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[nacks_received] . identifier[add] ( identifier[msg] . identifier[from_uid] )
keyword[if] identifier[len] ( identifier[self] . identifier[nacks_received] )== identifier[self] . identifier[quorum_size] :
keyword[return] identifier[self] . identifier[prepare] () | def receive_nack(self, msg):
"""
Returns a new Prepare message if the number of Nacks received reaches
a quorum.
"""
self.observe_proposal(msg.promised_proposal_id)
if msg.proposal_id == self.proposal_id and self.nacks_received is not None:
self.nacks_received.add(msg.from_uid)
if len(self.nacks_received) == self.quorum_size:
return self.prepare() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def is_hide(self, value, header=""):
"""Return True if the value is in the hide configuration list.
The hide configuration list is defined in the glances.conf file.
It is a comma separed list of regexp.
Example for diskio:
hide=sda2,sda5,loop.*
"""
# TODO: possible optimisation: create a re.compile list
return not all(j is None for j in [re.match(i, value.lower()) for i in self.get_conf_value('hide', header=header)]) | def function[is_hide, parameter[self, value, header]]:
constant[Return True if the value is in the hide configuration list.
The hide configuration list is defined in the glances.conf file.
It is a comma separed list of regexp.
Example for diskio:
hide=sda2,sda5,loop.*
]
return[<ast.UnaryOp object at 0x7da18dc07580>] | keyword[def] identifier[is_hide] ( identifier[self] , identifier[value] , identifier[header] = literal[string] ):
literal[string]
keyword[return] keyword[not] identifier[all] ( identifier[j] keyword[is] keyword[None] keyword[for] identifier[j] keyword[in] [ identifier[re] . identifier[match] ( identifier[i] , identifier[value] . identifier[lower] ()) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[get_conf_value] ( literal[string] , identifier[header] = identifier[header] )]) | def is_hide(self, value, header=''):
"""Return True if the value is in the hide configuration list.
The hide configuration list is defined in the glances.conf file.
It is a comma separed list of regexp.
Example for diskio:
hide=sda2,sda5,loop.*
"""
# TODO: possible optimisation: create a re.compile list
return not all((j is None for j in [re.match(i, value.lower()) for i in self.get_conf_value('hide', header=header)])) |
def __update(self, breakpoint_graph, merge_edges=False):
""" Updates a current :class`BreakpointGraph` object with information from a supplied :class`BreakpointGraph` instance.
Depending of a ``merge_edges`` flag, while updating of a current :class`BreakpointGraph` object is occuring, edges between similar vertices can be merged to already existing ones.
:param breakpoint_graph: a breakpoint graph to extract information from, which will be then added to the current
:type breakpoint_graph: :class`BreakpointGraph`
:param merge_edges: flag to indicate if edges to be added to current :class`BreakpointGraph` object are to be merged to already existing ones
:type merge_edges: ``Boolean``
:return: ``None``, performs inplace changes
"""
for bgedge in breakpoint_graph.edges():
self.__add_bgedge(bgedge=deepcopy(bgedge), merge=merge_edges) | def function[__update, parameter[self, breakpoint_graph, merge_edges]]:
constant[ Updates a current :class`BreakpointGraph` object with information from a supplied :class`BreakpointGraph` instance.
Depending of a ``merge_edges`` flag, while updating of a current :class`BreakpointGraph` object is occuring, edges between similar vertices can be merged to already existing ones.
:param breakpoint_graph: a breakpoint graph to extract information from, which will be then added to the current
:type breakpoint_graph: :class`BreakpointGraph`
:param merge_edges: flag to indicate if edges to be added to current :class`BreakpointGraph` object are to be merged to already existing ones
:type merge_edges: ``Boolean``
:return: ``None``, performs inplace changes
]
for taget[name[bgedge]] in starred[call[name[breakpoint_graph].edges, parameter[]]] begin[:]
call[name[self].__add_bgedge, parameter[]] | keyword[def] identifier[__update] ( identifier[self] , identifier[breakpoint_graph] , identifier[merge_edges] = keyword[False] ):
literal[string]
keyword[for] identifier[bgedge] keyword[in] identifier[breakpoint_graph] . identifier[edges] ():
identifier[self] . identifier[__add_bgedge] ( identifier[bgedge] = identifier[deepcopy] ( identifier[bgedge] ), identifier[merge] = identifier[merge_edges] ) | def __update(self, breakpoint_graph, merge_edges=False):
""" Updates a current :class`BreakpointGraph` object with information from a supplied :class`BreakpointGraph` instance.
Depending of a ``merge_edges`` flag, while updating of a current :class`BreakpointGraph` object is occuring, edges between similar vertices can be merged to already existing ones.
:param breakpoint_graph: a breakpoint graph to extract information from, which will be then added to the current
:type breakpoint_graph: :class`BreakpointGraph`
:param merge_edges: flag to indicate if edges to be added to current :class`BreakpointGraph` object are to be merged to already existing ones
:type merge_edges: ``Boolean``
:return: ``None``, performs inplace changes
"""
for bgedge in breakpoint_graph.edges():
self.__add_bgedge(bgedge=deepcopy(bgedge), merge=merge_edges) # depends on [control=['for'], data=['bgedge']] |
def count(self):
"Return a count of rows this Query would return."
return self.rpc_model.search_count(
self.domain, context=self.context
) | def function[count, parameter[self]]:
constant[Return a count of rows this Query would return.]
return[call[name[self].rpc_model.search_count, parameter[name[self].domain]]] | keyword[def] identifier[count] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[rpc_model] . identifier[search_count] (
identifier[self] . identifier[domain] , identifier[context] = identifier[self] . identifier[context]
) | def count(self):
"""Return a count of rows this Query would return."""
return self.rpc_model.search_count(self.domain, context=self.context) |
def hidden_item_tags(self):
""" Returns a list of tags which hide an item from the 'ls' output. """
hidden_item_tags = self.cp.get('ls', 'hidden_item_tags')
# pylint: disable=no-member
return [] if hidden_item_tags == '' else [tag.strip() for tag in
hidden_item_tags.split(',')] | def function[hidden_item_tags, parameter[self]]:
constant[ Returns a list of tags which hide an item from the 'ls' output. ]
variable[hidden_item_tags] assign[=] call[name[self].cp.get, parameter[constant[ls], constant[hidden_item_tags]]]
return[<ast.IfExp object at 0x7da20c7ca9e0>] | keyword[def] identifier[hidden_item_tags] ( identifier[self] ):
literal[string]
identifier[hidden_item_tags] = identifier[self] . identifier[cp] . identifier[get] ( literal[string] , literal[string] )
keyword[return] [] keyword[if] identifier[hidden_item_tags] == literal[string] keyword[else] [ identifier[tag] . identifier[strip] () keyword[for] identifier[tag] keyword[in]
identifier[hidden_item_tags] . identifier[split] ( literal[string] )] | def hidden_item_tags(self):
""" Returns a list of tags which hide an item from the 'ls' output. """
hidden_item_tags = self.cp.get('ls', 'hidden_item_tags')
# pylint: disable=no-member
return [] if hidden_item_tags == '' else [tag.strip() for tag in hidden_item_tags.split(',')] |
def _jwt_required(realm):
"""Does the actual work of verifying the JWT data in the current request.
This is done automatically for you by `jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
:param realm: an optional realm
"""
token = _jwt.request_callback()
if token is None:
raise JWTError('Authorization Required', 'Request does not contain an access token',
headers={'WWW-Authenticate': 'JWT realm="%s"' % realm})
try:
payload = _jwt.jwt_decode_callback(token)
except jwt.InvalidTokenError as e:
raise JWTError('Invalid token', str(e))
_request_ctx_stack.top.current_identity = identity = _jwt.identity_callback(payload)
if identity is None:
raise JWTError('Invalid JWT', 'User does not exist') | def function[_jwt_required, parameter[realm]]:
constant[Does the actual work of verifying the JWT data in the current request.
This is done automatically for you by `jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
:param realm: an optional realm
]
variable[token] assign[=] call[name[_jwt].request_callback, parameter[]]
if compare[name[token] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b11b91e0>
<ast.Try object at 0x7da1b11bbd90>
name[_request_ctx_stack].top.current_identity assign[=] call[name[_jwt].identity_callback, parameter[name[payload]]]
if compare[name[identity] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b11fe4a0> | keyword[def] identifier[_jwt_required] ( identifier[realm] ):
literal[string]
identifier[token] = identifier[_jwt] . identifier[request_callback] ()
keyword[if] identifier[token] keyword[is] keyword[None] :
keyword[raise] identifier[JWTError] ( literal[string] , literal[string] ,
identifier[headers] ={ literal[string] : literal[string] % identifier[realm] })
keyword[try] :
identifier[payload] = identifier[_jwt] . identifier[jwt_decode_callback] ( identifier[token] )
keyword[except] identifier[jwt] . identifier[InvalidTokenError] keyword[as] identifier[e] :
keyword[raise] identifier[JWTError] ( literal[string] , identifier[str] ( identifier[e] ))
identifier[_request_ctx_stack] . identifier[top] . identifier[current_identity] = identifier[identity] = identifier[_jwt] . identifier[identity_callback] ( identifier[payload] )
keyword[if] identifier[identity] keyword[is] keyword[None] :
keyword[raise] identifier[JWTError] ( literal[string] , literal[string] ) | def _jwt_required(realm):
"""Does the actual work of verifying the JWT data in the current request.
This is done automatically for you by `jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
:param realm: an optional realm
"""
token = _jwt.request_callback()
if token is None:
raise JWTError('Authorization Required', 'Request does not contain an access token', headers={'WWW-Authenticate': 'JWT realm="%s"' % realm}) # depends on [control=['if'], data=[]]
try:
payload = _jwt.jwt_decode_callback(token) # depends on [control=['try'], data=[]]
except jwt.InvalidTokenError as e:
raise JWTError('Invalid token', str(e)) # depends on [control=['except'], data=['e']]
_request_ctx_stack.top.current_identity = identity = _jwt.identity_callback(payload)
if identity is None:
raise JWTError('Invalid JWT', 'User does not exist') # depends on [control=['if'], data=[]] |
def set_defaults(defaults, kwargs):
"""
Set defaults from defaults dict to kwargs dict
:param defaults:
:type defaults:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
for key, value in defaults.items():
if key not in kwargs and value is not None:
kwargs[key] = value
elif isinstance(value, list) and isinstance(kwargs[key], list):
kwargs[key] = list(value) + kwargs[key]
elif isinstance(value, dict) and isinstance(kwargs[key], dict):
set_defaults(value, kwargs[key])
elif key in kwargs and value is None:
kwargs[key] = None | def function[set_defaults, parameter[defaults, kwargs]]:
constant[
Set defaults from defaults dict to kwargs dict
:param defaults:
:type defaults:
:param kwargs:
:type kwargs:
:return:
:rtype:
]
for taget[tuple[[<ast.Name object at 0x7da1b26acac0>, <ast.Name object at 0x7da1b26ad540>]]] in starred[call[name[defaults].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b26adcf0> begin[:]
call[name[kwargs]][name[key]] assign[=] name[value] | keyword[def] identifier[set_defaults] ( identifier[defaults] , identifier[kwargs] ):
literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[defaults] . identifier[items] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[kwargs] keyword[and] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ identifier[key] ]= identifier[value]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ) keyword[and] identifier[isinstance] ( identifier[kwargs] [ identifier[key] ], identifier[list] ):
identifier[kwargs] [ identifier[key] ]= identifier[list] ( identifier[value] )+ identifier[kwargs] [ identifier[key] ]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] identifier[isinstance] ( identifier[kwargs] [ identifier[key] ], identifier[dict] ):
identifier[set_defaults] ( identifier[value] , identifier[kwargs] [ identifier[key] ])
keyword[elif] identifier[key] keyword[in] identifier[kwargs] keyword[and] identifier[value] keyword[is] keyword[None] :
identifier[kwargs] [ identifier[key] ]= keyword[None] | def set_defaults(defaults, kwargs):
"""
Set defaults from defaults dict to kwargs dict
:param defaults:
:type defaults:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
for (key, value) in defaults.items():
if key not in kwargs and value is not None:
kwargs[key] = value # depends on [control=['if'], data=[]]
elif isinstance(value, list) and isinstance(kwargs[key], list):
kwargs[key] = list(value) + kwargs[key] # depends on [control=['if'], data=[]]
elif isinstance(value, dict) and isinstance(kwargs[key], dict):
set_defaults(value, kwargs[key]) # depends on [control=['if'], data=[]]
elif key in kwargs and value is None:
kwargs[key] = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def read_namespaced_persistent_volume_claim(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_persistent_volume_claim # noqa: E501
read the specified PersistentVolumeClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_persistent_volume_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolumeClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1PersistentVolumeClaim
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_persistent_volume_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_persistent_volume_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[read_namespaced_persistent_volume_claim, parameter[self, name, namespace]]:
constant[read_namespaced_persistent_volume_claim # noqa: E501
read the specified PersistentVolumeClaim # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_persistent_volume_claim(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolumeClaim (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1PersistentVolumeClaim
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].read_namespaced_persistent_volume_claim_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[read_namespaced_persistent_volume_claim] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[read_namespaced_persistent_volume_claim_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[read_namespaced_persistent_volume_claim_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def read_namespaced_persistent_volume_claim(self, name, namespace, **kwargs): # noqa: E501
"read_namespaced_persistent_volume_claim # noqa: E501\n\n read the specified PersistentVolumeClaim # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.read_namespaced_persistent_volume_claim(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the PersistentVolumeClaim (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.\n :param bool export: Should this value be exported. Export strips fields that a user can not specify.\n :return: V1PersistentVolumeClaim\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_persistent_volume_claim_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.read_namespaced_persistent_volume_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def main_loop():
'''main processing loop, display graphs and maps'''
while True:
if mestate is None or mestate.exit:
return
while not mestate.input_queue.empty():
line = mestate.input_queue.get()
cmds = line.split(';')
for c in cmds:
process_stdin(c)
time.sleep(0.1) | def function[main_loop, parameter[]]:
constant[main processing loop, display graphs and maps]
while constant[True] begin[:]
if <ast.BoolOp object at 0x7da1b1687c70> begin[:]
return[None]
while <ast.UnaryOp object at 0x7da1b1684b80> begin[:]
variable[line] assign[=] call[name[mestate].input_queue.get, parameter[]]
variable[cmds] assign[=] call[name[line].split, parameter[constant[;]]]
for taget[name[c]] in starred[name[cmds]] begin[:]
call[name[process_stdin], parameter[name[c]]]
call[name[time].sleep, parameter[constant[0.1]]] | keyword[def] identifier[main_loop] ():
literal[string]
keyword[while] keyword[True] :
keyword[if] identifier[mestate] keyword[is] keyword[None] keyword[or] identifier[mestate] . identifier[exit] :
keyword[return]
keyword[while] keyword[not] identifier[mestate] . identifier[input_queue] . identifier[empty] ():
identifier[line] = identifier[mestate] . identifier[input_queue] . identifier[get] ()
identifier[cmds] = identifier[line] . identifier[split] ( literal[string] )
keyword[for] identifier[c] keyword[in] identifier[cmds] :
identifier[process_stdin] ( identifier[c] )
identifier[time] . identifier[sleep] ( literal[int] ) | def main_loop():
"""main processing loop, display graphs and maps"""
while True:
if mestate is None or mestate.exit:
return # depends on [control=['if'], data=[]]
while not mestate.input_queue.empty():
line = mestate.input_queue.get()
cmds = line.split(';')
for c in cmds:
process_stdin(c) # depends on [control=['for'], data=['c']] # depends on [control=['while'], data=[]]
time.sleep(0.1) # depends on [control=['while'], data=[]] |
def add_child(self, child, name=None, index=None):
"""Add a child."""
if name is None:
name = child.get_name()
if index is None:
self._children[name] = child
else:
items = [item for item in self._children.items()
if item[0] != name]
items.insert(int(index), (name, child))
self._children = OrderedDict(items)
child._parent = self
return self | def function[add_child, parameter[self, child, name, index]]:
constant[Add a child.]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] call[name[child].get_name, parameter[]]
if compare[name[index] is constant[None]] begin[:]
call[name[self]._children][name[name]] assign[=] name[child]
name[child]._parent assign[=] name[self]
return[name[self]] | keyword[def] identifier[add_child] ( identifier[self] , identifier[child] , identifier[name] = keyword[None] , identifier[index] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[child] . identifier[get_name] ()
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[self] . identifier[_children] [ identifier[name] ]= identifier[child]
keyword[else] :
identifier[items] =[ identifier[item] keyword[for] identifier[item] keyword[in] identifier[self] . identifier[_children] . identifier[items] ()
keyword[if] identifier[item] [ literal[int] ]!= identifier[name] ]
identifier[items] . identifier[insert] ( identifier[int] ( identifier[index] ),( identifier[name] , identifier[child] ))
identifier[self] . identifier[_children] = identifier[OrderedDict] ( identifier[items] )
identifier[child] . identifier[_parent] = identifier[self]
keyword[return] identifier[self] | def add_child(self, child, name=None, index=None):
"""Add a child."""
if name is None:
name = child.get_name() # depends on [control=['if'], data=['name']]
if index is None:
self._children[name] = child # depends on [control=['if'], data=[]]
else:
items = [item for item in self._children.items() if item[0] != name]
items.insert(int(index), (name, child))
self._children = OrderedDict(items)
child._parent = self
return self |
def get_usage(self):
"""
parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations )
"""
usage = {}
for cpu, timings in self.get_cpu_timings().items():
cpu_total = sum(timings)
del timings[3:5]
cpu_busy = sum(timings)
cpu_usage = self.calculate_usage(cpu, cpu_total, cpu_busy)
usage['usage_' + cpu] = cpu_usage
# for backward compatibility
usage['usage'] = usage['usage_cpu']
return usage | def function[get_usage, parameter[self]]:
constant[
parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations )
]
variable[usage] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2047eb4f0>, <ast.Name object at 0x7da2047eabf0>]]] in starred[call[call[name[self].get_cpu_timings, parameter[]].items, parameter[]]] begin[:]
variable[cpu_total] assign[=] call[name[sum], parameter[name[timings]]]
<ast.Delete object at 0x7da2047ead40>
variable[cpu_busy] assign[=] call[name[sum], parameter[name[timings]]]
variable[cpu_usage] assign[=] call[name[self].calculate_usage, parameter[name[cpu], name[cpu_total], name[cpu_busy]]]
call[name[usage]][binary_operation[constant[usage_] + name[cpu]]] assign[=] name[cpu_usage]
call[name[usage]][constant[usage]] assign[=] call[name[usage]][constant[usage_cpu]]
return[name[usage]] | keyword[def] identifier[get_usage] ( identifier[self] ):
literal[string]
identifier[usage] ={}
keyword[for] identifier[cpu] , identifier[timings] keyword[in] identifier[self] . identifier[get_cpu_timings] (). identifier[items] ():
identifier[cpu_total] = identifier[sum] ( identifier[timings] )
keyword[del] identifier[timings] [ literal[int] : literal[int] ]
identifier[cpu_busy] = identifier[sum] ( identifier[timings] )
identifier[cpu_usage] = identifier[self] . identifier[calculate_usage] ( identifier[cpu] , identifier[cpu_total] , identifier[cpu_busy] )
identifier[usage] [ literal[string] + identifier[cpu] ]= identifier[cpu_usage]
identifier[usage] [ literal[string] ]= identifier[usage] [ literal[string] ]
keyword[return] identifier[usage] | def get_usage(self):
"""
parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations )
"""
usage = {}
for (cpu, timings) in self.get_cpu_timings().items():
cpu_total = sum(timings)
del timings[3:5]
cpu_busy = sum(timings)
cpu_usage = self.calculate_usage(cpu, cpu_total, cpu_busy)
usage['usage_' + cpu] = cpu_usage # depends on [control=['for'], data=[]]
# for backward compatibility
usage['usage'] = usage['usage_cpu']
return usage |
def validate(message, **config):
""" Return true or false if the message is signed appropriately. """
if not _validate_implementations:
init(**config)
cfg = copy.deepcopy(config)
if 'gpg_home' not in cfg:
cfg['gpg_home'] = os.path.expanduser('~/.gnupg/')
if 'ssldir' not in cfg:
cfg['ssldir'] = '/etc/pki/fedmsg'
if 'crypto' in message:
if not message['crypto'] in _possible_backends:
log.warn("Message specified an impossible crypto backend")
return False
try:
backend = _possible_backends[message['crypto']]
except Exception as e:
log.warn("Failed to load %r %r" % (message['crypto'], e))
return False
# fedmsg 0.7.2 and earlier did not specify which crypto backend a message
# was signed with. As long as we care about interoperability with those
# versions, attempt to guess the backend to use
elif 'certificate' in message:
backend = x509
elif 'signature' in message:
backend = gpg
else:
log.warn('Could not determine crypto backend. Message unsigned?')
return False
if backend in _validate_implementations:
return backend.validate(message, **cfg)
else:
log.warn("Crypto backend %r is disallowed" % backend)
return False | def function[validate, parameter[message]]:
constant[ Return true or false if the message is signed appropriately. ]
if <ast.UnaryOp object at 0x7da18bc72860> begin[:]
call[name[init], parameter[]]
variable[cfg] assign[=] call[name[copy].deepcopy, parameter[name[config]]]
if compare[constant[gpg_home] <ast.NotIn object at 0x7da2590d7190> name[cfg]] begin[:]
call[name[cfg]][constant[gpg_home]] assign[=] call[name[os].path.expanduser, parameter[constant[~/.gnupg/]]]
if compare[constant[ssldir] <ast.NotIn object at 0x7da2590d7190> name[cfg]] begin[:]
call[name[cfg]][constant[ssldir]] assign[=] constant[/etc/pki/fedmsg]
if compare[constant[crypto] in name[message]] begin[:]
if <ast.UnaryOp object at 0x7da1b0619390> begin[:]
call[name[log].warn, parameter[constant[Message specified an impossible crypto backend]]]
return[constant[False]]
<ast.Try object at 0x7da1b061b9d0>
if compare[name[backend] in name[_validate_implementations]] begin[:]
return[call[name[backend].validate, parameter[name[message]]]] | keyword[def] identifier[validate] ( identifier[message] ,** identifier[config] ):
literal[string]
keyword[if] keyword[not] identifier[_validate_implementations] :
identifier[init] (** identifier[config] )
identifier[cfg] = identifier[copy] . identifier[deepcopy] ( identifier[config] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[cfg] :
identifier[cfg] [ literal[string] ]= identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[cfg] :
identifier[cfg] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[in] identifier[message] :
keyword[if] keyword[not] identifier[message] [ literal[string] ] keyword[in] identifier[_possible_backends] :
identifier[log] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[try] :
identifier[backend] = identifier[_possible_backends] [ identifier[message] [ literal[string] ]]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[warn] ( literal[string] %( identifier[message] [ literal[string] ], identifier[e] ))
keyword[return] keyword[False]
keyword[elif] literal[string] keyword[in] identifier[message] :
identifier[backend] = identifier[x509]
keyword[elif] literal[string] keyword[in] identifier[message] :
identifier[backend] = identifier[gpg]
keyword[else] :
identifier[log] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[backend] keyword[in] identifier[_validate_implementations] :
keyword[return] identifier[backend] . identifier[validate] ( identifier[message] ,** identifier[cfg] )
keyword[else] :
identifier[log] . identifier[warn] ( literal[string] % identifier[backend] )
keyword[return] keyword[False] | def validate(message, **config):
""" Return true or false if the message is signed appropriately. """
if not _validate_implementations:
init(**config) # depends on [control=['if'], data=[]]
cfg = copy.deepcopy(config)
if 'gpg_home' not in cfg:
cfg['gpg_home'] = os.path.expanduser('~/.gnupg/') # depends on [control=['if'], data=['cfg']]
if 'ssldir' not in cfg:
cfg['ssldir'] = '/etc/pki/fedmsg' # depends on [control=['if'], data=['cfg']]
if 'crypto' in message:
if not message['crypto'] in _possible_backends:
log.warn('Message specified an impossible crypto backend')
return False # depends on [control=['if'], data=[]]
try:
backend = _possible_backends[message['crypto']] # depends on [control=['try'], data=[]]
except Exception as e:
log.warn('Failed to load %r %r' % (message['crypto'], e))
return False # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['message']]
# fedmsg 0.7.2 and earlier did not specify which crypto backend a message
# was signed with. As long as we care about interoperability with those
# versions, attempt to guess the backend to use
elif 'certificate' in message:
backend = x509 # depends on [control=['if'], data=[]]
elif 'signature' in message:
backend = gpg # depends on [control=['if'], data=[]]
else:
log.warn('Could not determine crypto backend. Message unsigned?')
return False
if backend in _validate_implementations:
return backend.validate(message, **cfg) # depends on [control=['if'], data=['backend']]
else:
log.warn('Crypto backend %r is disallowed' % backend)
return False |
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn}) | def function[pipfaster_download_cacher, parameter[index_urls]]:
constant[vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
]
from relative_module[pip._internal] import module[download]
variable[orig] assign[=] name[download]._download_http_url
variable[patched_fn] assign[=] call[name[get_patched_download_http_url], parameter[name[orig], name[index_urls]]]
return[call[name[patched], parameter[call[name[vars], parameter[name[download]]], dictionary[[<ast.Constant object at 0x7da1b2345840>], [<ast.Name object at 0x7da18fe90550>]]]]] | keyword[def] identifier[pipfaster_download_cacher] ( identifier[index_urls] ):
literal[string]
keyword[from] identifier[pip] . identifier[_internal] keyword[import] identifier[download]
identifier[orig] = identifier[download] . identifier[_download_http_url]
identifier[patched_fn] = identifier[get_patched_download_http_url] ( identifier[orig] , identifier[index_urls] )
keyword[return] identifier[patched] ( identifier[vars] ( identifier[download] ),{ literal[string] : identifier[patched_fn] }) | def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn}) |
def cast_value(self, value, constraints=True):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
# Null value
if value in self.__missing_values:
value = None
# Cast value
cast_value = value
if value is not None:
cast_value = self.__cast_function(value)
if cast_value == config.ERROR:
raise exceptions.CastError((
'Field "{field.name}" can\'t cast value "{value}" '
'for type "{field.type}" with format "{field.format}"'
).format(field=self, value=value))
# Check value
if constraints:
for name, check in self.__check_functions.items():
if isinstance(constraints, list):
if name not in constraints:
continue
passed = check(cast_value)
if not passed:
raise exceptions.CastError((
'Field "{field.name}" has constraint "{name}" '
'which is not satisfied for value "{value}"'
).format(field=self, name=name, value=value))
return cast_value | def function[cast_value, parameter[self, value, constraints]]:
constant[https://github.com/frictionlessdata/tableschema-py#field
]
if compare[name[value] in name[self].__missing_values] begin[:]
variable[value] assign[=] constant[None]
variable[cast_value] assign[=] name[value]
if compare[name[value] is_not constant[None]] begin[:]
variable[cast_value] assign[=] call[name[self].__cast_function, parameter[name[value]]]
if compare[name[cast_value] equal[==] name[config].ERROR] begin[:]
<ast.Raise object at 0x7da18dc07580>
if name[constraints] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2044c2170>, <ast.Name object at 0x7da2044c0e80>]]] in starred[call[name[self].__check_functions.items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[constraints], name[list]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[constraints]] begin[:]
continue
variable[passed] assign[=] call[name[check], parameter[name[cast_value]]]
if <ast.UnaryOp object at 0x7da2044c3be0> begin[:]
<ast.Raise object at 0x7da2044c17e0>
return[name[cast_value]] | keyword[def] identifier[cast_value] ( identifier[self] , identifier[value] , identifier[constraints] = keyword[True] ):
literal[string]
keyword[if] identifier[value] keyword[in] identifier[self] . identifier[__missing_values] :
identifier[value] = keyword[None]
identifier[cast_value] = identifier[value]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[cast_value] = identifier[self] . identifier[__cast_function] ( identifier[value] )
keyword[if] identifier[cast_value] == identifier[config] . identifier[ERROR] :
keyword[raise] identifier[exceptions] . identifier[CastError] ((
literal[string]
literal[string]
). identifier[format] ( identifier[field] = identifier[self] , identifier[value] = identifier[value] ))
keyword[if] identifier[constraints] :
keyword[for] identifier[name] , identifier[check] keyword[in] identifier[self] . identifier[__check_functions] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[constraints] , identifier[list] ):
keyword[if] identifier[name] keyword[not] keyword[in] identifier[constraints] :
keyword[continue]
identifier[passed] = identifier[check] ( identifier[cast_value] )
keyword[if] keyword[not] identifier[passed] :
keyword[raise] identifier[exceptions] . identifier[CastError] ((
literal[string]
literal[string]
). identifier[format] ( identifier[field] = identifier[self] , identifier[name] = identifier[name] , identifier[value] = identifier[value] ))
keyword[return] identifier[cast_value] | def cast_value(self, value, constraints=True):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
# Null value
if value in self.__missing_values:
value = None # depends on [control=['if'], data=['value']]
# Cast value
cast_value = value
if value is not None:
cast_value = self.__cast_function(value)
if cast_value == config.ERROR:
raise exceptions.CastError('Field "{field.name}" can\'t cast value "{value}" for type "{field.type}" with format "{field.format}"'.format(field=self, value=value)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
# Check value
if constraints:
for (name, check) in self.__check_functions.items():
if isinstance(constraints, list):
if name not in constraints:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
passed = check(cast_value)
if not passed:
raise exceptions.CastError('Field "{field.name}" has constraint "{name}" which is not satisfied for value "{value}"'.format(field=self, name=name, value=value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return cast_value |
def could_scope_out(self):
"""
could bubble up from current scope
:return:
"""
return not self.waiting_for or \
isinstance(self.waiting_for, callable.EndOfStory) or \
self.is_breaking_a_loop() | def function[could_scope_out, parameter[self]]:
constant[
could bubble up from current scope
:return:
]
return[<ast.BoolOp object at 0x7da1b28fd090>] | keyword[def] identifier[could_scope_out] ( identifier[self] ):
literal[string]
keyword[return] keyword[not] identifier[self] . identifier[waiting_for] keyword[or] identifier[isinstance] ( identifier[self] . identifier[waiting_for] , identifier[callable] . identifier[EndOfStory] ) keyword[or] identifier[self] . identifier[is_breaking_a_loop] () | def could_scope_out(self):
"""
could bubble up from current scope
:return:
"""
return not self.waiting_for or isinstance(self.waiting_for, callable.EndOfStory) or self.is_breaking_a_loop() |
def compare_vals(cls, sort=True):
"""Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
"""
if cls._compare_vals:
return cls._compare_vals
# If `_compare_vals` is not yet defined, create it
# ----------------------------------------
_compare_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_compare_vals.extend(mro.compare_vals(sort=False))
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_compare_vals.extend([
vv for kk, vv in vars(cls).items()
if (not kk.startswith('_') and not callable(getattr(cls, kk)) and
vv.compare)
])
# Sort keys based on priority, high priority values first
if sort:
_compare_vals = sorted(
_compare_vals,
reverse=True,
key=lambda key: (key.priority, key.name))
# Store for future retrieval
cls._compare_vals = _compare_vals
return cls._compare_vals | def function[compare_vals, parameter[cls, sort]]:
constant[Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
]
if name[cls]._compare_vals begin[:]
return[name[cls]._compare_vals]
variable[_compare_vals] assign[=] list[[]]
for taget[name[mro]] in starred[name[cls].__bases__] begin[:]
if call[name[issubclass], parameter[name[mro], name[KeyCollection]]] begin[:]
call[name[_compare_vals].extend, parameter[call[name[mro].compare_vals, parameter[]]]]
call[name[_compare_vals].extend, parameter[<ast.ListComp object at 0x7da1b0fc4d60>]]
if name[sort] begin[:]
variable[_compare_vals] assign[=] call[name[sorted], parameter[name[_compare_vals]]]
name[cls]._compare_vals assign[=] name[_compare_vals]
return[name[cls]._compare_vals] | keyword[def] identifier[compare_vals] ( identifier[cls] , identifier[sort] = keyword[True] ):
literal[string]
keyword[if] identifier[cls] . identifier[_compare_vals] :
keyword[return] identifier[cls] . identifier[_compare_vals]
identifier[_compare_vals] =[]
keyword[for] identifier[mro] keyword[in] identifier[cls] . identifier[__bases__] :
keyword[if] identifier[issubclass] ( identifier[mro] , identifier[KeyCollection] ):
identifier[_compare_vals] . identifier[extend] ( identifier[mro] . identifier[compare_vals] ( identifier[sort] = keyword[False] ))
identifier[_compare_vals] . identifier[extend] ([
identifier[vv] keyword[for] identifier[kk] , identifier[vv] keyword[in] identifier[vars] ( identifier[cls] ). identifier[items] ()
keyword[if] ( keyword[not] identifier[kk] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[callable] ( identifier[getattr] ( identifier[cls] , identifier[kk] )) keyword[and]
identifier[vv] . identifier[compare] )
])
keyword[if] identifier[sort] :
identifier[_compare_vals] = identifier[sorted] (
identifier[_compare_vals] ,
identifier[reverse] = keyword[True] ,
identifier[key] = keyword[lambda] identifier[key] :( identifier[key] . identifier[priority] , identifier[key] . identifier[name] ))
identifier[cls] . identifier[_compare_vals] = identifier[_compare_vals]
keyword[return] identifier[cls] . identifier[_compare_vals] | def compare_vals(cls, sort=True):
"""Return this class's attribute values (those not stating with '_'),
but only for attributes with `compare` set to `True`.
Returns
-------
_compare_vals : list of objects
List of values of internal attributes to use when comparing
`CatDict` objects. Order sorted by `Key` priority, followed by
alphabetical.
"""
if cls._compare_vals:
return cls._compare_vals # depends on [control=['if'], data=[]]
# If `_compare_vals` is not yet defined, create it
# ----------------------------------------
_compare_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_compare_vals.extend(mro.compare_vals(sort=False)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mro']]
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_compare_vals.extend([vv for (kk, vv) in vars(cls).items() if not kk.startswith('_') and (not callable(getattr(cls, kk))) and vv.compare])
# Sort keys based on priority, high priority values first
if sort:
_compare_vals = sorted(_compare_vals, reverse=True, key=lambda key: (key.priority, key.name)) # depends on [control=['if'], data=[]]
# Store for future retrieval
cls._compare_vals = _compare_vals
return cls._compare_vals |
def lookup_char(self, char):
"""Return character information from datasets.
Parameters
----------
char : str
character / string to lookup
Returns
-------
:class:`sqlalchemy.orm.query.Query` :
list of matches
"""
Unihan = self.sql.base.classes.Unihan
return self.sql.session.query(Unihan).filter_by(char=char) | def function[lookup_char, parameter[self, char]]:
constant[Return character information from datasets.
Parameters
----------
char : str
character / string to lookup
Returns
-------
:class:`sqlalchemy.orm.query.Query` :
list of matches
]
variable[Unihan] assign[=] name[self].sql.base.classes.Unihan
return[call[call[name[self].sql.session.query, parameter[name[Unihan]]].filter_by, parameter[]]] | keyword[def] identifier[lookup_char] ( identifier[self] , identifier[char] ):
literal[string]
identifier[Unihan] = identifier[self] . identifier[sql] . identifier[base] . identifier[classes] . identifier[Unihan]
keyword[return] identifier[self] . identifier[sql] . identifier[session] . identifier[query] ( identifier[Unihan] ). identifier[filter_by] ( identifier[char] = identifier[char] ) | def lookup_char(self, char):
"""Return character information from datasets.
Parameters
----------
char : str
character / string to lookup
Returns
-------
:class:`sqlalchemy.orm.query.Query` :
list of matches
"""
Unihan = self.sql.base.classes.Unihan
return self.sql.session.query(Unihan).filter_by(char=char) |
def dropbox_fileupload(dropbox, request):
""" accepts a single file upload and adds it to the dropbox as attachment"""
attachment = request.POST['attachment']
attached = dropbox.add_attachment(attachment)
return dict(
files=[dict(
name=attached,
type=attachment.type,
)]
) | def function[dropbox_fileupload, parameter[dropbox, request]]:
constant[ accepts a single file upload and adds it to the dropbox as attachment]
variable[attachment] assign[=] call[name[request].POST][constant[attachment]]
variable[attached] assign[=] call[name[dropbox].add_attachment, parameter[name[attachment]]]
return[call[name[dict], parameter[]]] | keyword[def] identifier[dropbox_fileupload] ( identifier[dropbox] , identifier[request] ):
literal[string]
identifier[attachment] = identifier[request] . identifier[POST] [ literal[string] ]
identifier[attached] = identifier[dropbox] . identifier[add_attachment] ( identifier[attachment] )
keyword[return] identifier[dict] (
identifier[files] =[ identifier[dict] (
identifier[name] = identifier[attached] ,
identifier[type] = identifier[attachment] . identifier[type] ,
)]
) | def dropbox_fileupload(dropbox, request):
""" accepts a single file upload and adds it to the dropbox as attachment"""
attachment = request.POST['attachment']
attached = dropbox.add_attachment(attachment)
return dict(files=[dict(name=attached, type=attachment.type)]) |
def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
d = map(None, self.digest())
d = map(ord, d)
d = map(lambda x:"%02x" % x, d)
d = string.join(d, '')
return d | def function[hexdigest, parameter[self]]:
constant[Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
]
variable[d] assign[=] call[name[map], parameter[constant[None], call[name[self].digest, parameter[]]]]
variable[d] assign[=] call[name[map], parameter[name[ord], name[d]]]
variable[d] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18dc07970>, name[d]]]
variable[d] assign[=] call[name[string].join, parameter[name[d], constant[]]]
return[name[d]] | keyword[def] identifier[hexdigest] ( identifier[self] ):
literal[string]
identifier[d] = identifier[map] ( keyword[None] , identifier[self] . identifier[digest] ())
identifier[d] = identifier[map] ( identifier[ord] , identifier[d] )
identifier[d] = identifier[map] ( keyword[lambda] identifier[x] : literal[string] % identifier[x] , identifier[d] )
identifier[d] = identifier[string] . identifier[join] ( identifier[d] , literal[string] )
keyword[return] identifier[d] | def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
d = map(None, self.digest())
d = map(ord, d)
d = map(lambda x: '%02x' % x, d)
d = string.join(d, '')
return d |
def reset_parameter(**kwargs):
"""Create a callback that resets the parameter after the first iteration.
Note
----
The initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs : value should be list or function
List of parameters for each boosting round
or a customized function that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If function func, parameter = func(current_round).
Returns
-------
callback : function
The callback that resets the parameter after the first iteration.
"""
def _callback(env):
new_parameters = {}
for key, value in kwargs.items():
if key in ['num_class', 'num_classes',
'boosting', 'boost', 'boosting_type',
'metric', 'metrics', 'metric_types']:
raise RuntimeError("cannot reset {} during training".format(repr(key)))
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'."
.format(repr(key)))
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters)
_callback.before_iteration = True
_callback.order = 10
return _callback | def function[reset_parameter, parameter[]]:
constant[Create a callback that resets the parameter after the first iteration.
Note
----
The initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs : value should be list or function
List of parameters for each boosting round
or a customized function that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If function func, parameter = func(current_round).
Returns
-------
callback : function
The callback that resets the parameter after the first iteration.
]
def function[_callback, parameter[env]]:
variable[new_parameters] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204344a00>, <ast.Name object at 0x7da2043477c0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[name[key] in list[[<ast.Constant object at 0x7da204344940>, <ast.Constant object at 0x7da204345ff0>, <ast.Constant object at 0x7da204344040>, <ast.Constant object at 0x7da204346620>, <ast.Constant object at 0x7da204346950>, <ast.Constant object at 0x7da204345180>, <ast.Constant object at 0x7da2043468f0>, <ast.Constant object at 0x7da204346f80>]]] begin[:]
<ast.Raise object at 0x7da204346bf0>
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
if compare[call[name[len], parameter[name[value]]] not_equal[!=] binary_operation[name[env].end_iteration - name[env].begin_iteration]] begin[:]
<ast.Raise object at 0x7da2041d83a0>
variable[new_param] assign[=] call[name[value]][binary_operation[name[env].iteration - name[env].begin_iteration]]
if compare[name[new_param] not_equal[!=] call[name[env].params.get, parameter[name[key], constant[None]]]] begin[:]
call[name[new_parameters]][name[key]] assign[=] name[new_param]
if name[new_parameters] begin[:]
call[name[env].model.reset_parameter, parameter[name[new_parameters]]]
call[name[env].params.update, parameter[name[new_parameters]]]
name[_callback].before_iteration assign[=] constant[True]
name[_callback].order assign[=] constant[10]
return[name[_callback]] | keyword[def] identifier[reset_parameter] (** identifier[kwargs] ):
literal[string]
keyword[def] identifier[_callback] ( identifier[env] ):
identifier[new_parameters] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[key] keyword[in] [ literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]:
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[key] )))
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[if] identifier[len] ( identifier[value] )!= identifier[env] . identifier[end_iteration] - identifier[env] . identifier[begin_iteration] :
keyword[raise] identifier[ValueError] ( literal[string]
. identifier[format] ( identifier[repr] ( identifier[key] )))
identifier[new_param] = identifier[value] [ identifier[env] . identifier[iteration] - identifier[env] . identifier[begin_iteration] ]
keyword[else] :
identifier[new_param] = identifier[value] ( identifier[env] . identifier[iteration] - identifier[env] . identifier[begin_iteration] )
keyword[if] identifier[new_param] != identifier[env] . identifier[params] . identifier[get] ( identifier[key] , keyword[None] ):
identifier[new_parameters] [ identifier[key] ]= identifier[new_param]
keyword[if] identifier[new_parameters] :
identifier[env] . identifier[model] . identifier[reset_parameter] ( identifier[new_parameters] )
identifier[env] . identifier[params] . identifier[update] ( identifier[new_parameters] )
identifier[_callback] . identifier[before_iteration] = keyword[True]
identifier[_callback] . identifier[order] = literal[int]
keyword[return] identifier[_callback] | def reset_parameter(**kwargs):
"""Create a callback that resets the parameter after the first iteration.
Note
----
The initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs : value should be list or function
List of parameters for each boosting round
or a customized function that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If function func, parameter = func(current_round).
Returns
-------
callback : function
The callback that resets the parameter after the first iteration.
"""
def _callback(env):
new_parameters = {}
for (key, value) in kwargs.items():
if key in ['num_class', 'num_classes', 'boosting', 'boost', 'boosting_type', 'metric', 'metrics', 'metric_types']:
raise RuntimeError('cannot reset {} during training'.format(repr(key))) # depends on [control=['if'], data=['key']]
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'.".format(repr(key))) # depends on [control=['if'], data=[]]
new_param = value[env.iteration - env.begin_iteration] # depends on [control=['if'], data=[]]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param # depends on [control=['if'], data=['new_param']] # depends on [control=['for'], data=[]]
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters) # depends on [control=['if'], data=[]]
_callback.before_iteration = True
_callback.order = 10
return _callback |
def __get_segment_types(self, element):
"""
given a <segment> or <group> element, returns its segment type and the
segment type of its parent (i.e. its dominating node)
Parameters
----------
element : ??? etree Element
Returns
-------
segment_type : str
'nucleus', 'satellite' or 'isolated' (unconnected segment, e.g. a
news headline) or 'span' (iff the segment type is currently
unknown -- i.e. ``relname`` is ``span``)
parent_segment_type : str or None
'nucleus', 'satellite' or None (e.g. for the root group node)
"""
if not 'parent' in element.attrib:
if element.tag == 'segment':
segment_type = 'isolated'
parent_segment_type = None
else: # element.tag == 'group'
segment_type = 'span'
parent_segment_type = None
return segment_type, parent_segment_type
# ``relname`` either contains the name of an RST relation or
# the string ``span`` (iff the segment is dominated by a span
# node -- a horizontal line spanning one or more segments/groups
# in an RST diagram). ``relname`` is '', if the segment is
# unconnected.
relname = element.attrib.get('relname', '')
# we look up, if ``relname`` represents a regular, binary RST
# relation or a multinucular relation. ``reltype`` is '',
# if ``relname`` is ``span`` (i.e. a span isn't an RST relation).
reltype = self.relations.get(relname, '')
if reltype == 'rst':
segment_type = 'satellite'
parent_segment_type = 'nucleus'
elif reltype == 'multinuc':
segment_type = 'nucleus'
parent_segment_type = None # we don't know it's type, yet
else: # reltype == ''
# the segment is of unknown type, it is dominated by
# a span group node
segment_type = 'span'
parent_segment_type = 'span'
return segment_type, parent_segment_type | def function[__get_segment_types, parameter[self, element]]:
constant[
given a <segment> or <group> element, returns its segment type and the
segment type of its parent (i.e. its dominating node)
Parameters
----------
element : ??? etree Element
Returns
-------
segment_type : str
'nucleus', 'satellite' or 'isolated' (unconnected segment, e.g. a
news headline) or 'span' (iff the segment type is currently
unknown -- i.e. ``relname`` is ``span``)
parent_segment_type : str or None
'nucleus', 'satellite' or None (e.g. for the root group node)
]
if <ast.UnaryOp object at 0x7da1b26acc40> begin[:]
if compare[name[element].tag equal[==] constant[segment]] begin[:]
variable[segment_type] assign[=] constant[isolated]
variable[parent_segment_type] assign[=] constant[None]
return[tuple[[<ast.Name object at 0x7da1b26ae710>, <ast.Name object at 0x7da1b26ac4f0>]]]
variable[relname] assign[=] call[name[element].attrib.get, parameter[constant[relname], constant[]]]
variable[reltype] assign[=] call[name[self].relations.get, parameter[name[relname], constant[]]]
if compare[name[reltype] equal[==] constant[rst]] begin[:]
variable[segment_type] assign[=] constant[satellite]
variable[parent_segment_type] assign[=] constant[nucleus]
return[tuple[[<ast.Name object at 0x7da1b26ac610>, <ast.Name object at 0x7da1b26aee90>]]] | keyword[def] identifier[__get_segment_types] ( identifier[self] , identifier[element] ):
literal[string]
keyword[if] keyword[not] literal[string] keyword[in] identifier[element] . identifier[attrib] :
keyword[if] identifier[element] . identifier[tag] == literal[string] :
identifier[segment_type] = literal[string]
identifier[parent_segment_type] = keyword[None]
keyword[else] :
identifier[segment_type] = literal[string]
identifier[parent_segment_type] = keyword[None]
keyword[return] identifier[segment_type] , identifier[parent_segment_type]
identifier[relname] = identifier[element] . identifier[attrib] . identifier[get] ( literal[string] , literal[string] )
identifier[reltype] = identifier[self] . identifier[relations] . identifier[get] ( identifier[relname] , literal[string] )
keyword[if] identifier[reltype] == literal[string] :
identifier[segment_type] = literal[string]
identifier[parent_segment_type] = literal[string]
keyword[elif] identifier[reltype] == literal[string] :
identifier[segment_type] = literal[string]
identifier[parent_segment_type] = keyword[None]
keyword[else] :
identifier[segment_type] = literal[string]
identifier[parent_segment_type] = literal[string]
keyword[return] identifier[segment_type] , identifier[parent_segment_type] | def __get_segment_types(self, element):
"""
given a <segment> or <group> element, returns its segment type and the
segment type of its parent (i.e. its dominating node)
Parameters
----------
element : ??? etree Element
Returns
-------
segment_type : str
'nucleus', 'satellite' or 'isolated' (unconnected segment, e.g. a
news headline) or 'span' (iff the segment type is currently
unknown -- i.e. ``relname`` is ``span``)
parent_segment_type : str or None
'nucleus', 'satellite' or None (e.g. for the root group node)
"""
if not 'parent' in element.attrib:
if element.tag == 'segment':
segment_type = 'isolated'
parent_segment_type = None # depends on [control=['if'], data=[]]
else: # element.tag == 'group'
segment_type = 'span'
parent_segment_type = None
return (segment_type, parent_segment_type) # depends on [control=['if'], data=[]]
# ``relname`` either contains the name of an RST relation or
# the string ``span`` (iff the segment is dominated by a span
# node -- a horizontal line spanning one or more segments/groups
# in an RST diagram). ``relname`` is '', if the segment is
# unconnected.
relname = element.attrib.get('relname', '')
# we look up, if ``relname`` represents a regular, binary RST
# relation or a multinucular relation. ``reltype`` is '',
# if ``relname`` is ``span`` (i.e. a span isn't an RST relation).
reltype = self.relations.get(relname, '')
if reltype == 'rst':
segment_type = 'satellite'
parent_segment_type = 'nucleus' # depends on [control=['if'], data=[]]
elif reltype == 'multinuc':
segment_type = 'nucleus'
parent_segment_type = None # we don't know it's type, yet # depends on [control=['if'], data=[]]
else: # reltype == ''
# the segment is of unknown type, it is dominated by
# a span group node
segment_type = 'span'
parent_segment_type = 'span'
return (segment_type, parent_segment_type) |
def list(self):
"""
Return a list of Accounts from Toshl for the current user
"""
response = self.client._make_request('/accounts')
response = response.json()
return self.client._list_response(response) | def function[list, parameter[self]]:
constant[
Return a list of Accounts from Toshl for the current user
]
variable[response] assign[=] call[name[self].client._make_request, parameter[constant[/accounts]]]
variable[response] assign[=] call[name[response].json, parameter[]]
return[call[name[self].client._list_response, parameter[name[response]]]] | keyword[def] identifier[list] ( identifier[self] ):
literal[string]
identifier[response] = identifier[self] . identifier[client] . identifier[_make_request] ( literal[string] )
identifier[response] = identifier[response] . identifier[json] ()
keyword[return] identifier[self] . identifier[client] . identifier[_list_response] ( identifier[response] ) | def list(self):
"""
Return a list of Accounts from Toshl for the current user
"""
response = self.client._make_request('/accounts')
response = response.json()
return self.client._list_response(response) |
def to_list(i, use_keys=False):
'''
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
'''
from photon.util.system import shell_notify
if not i:
return []
if isinstance(i, str):
return [i]
if isinstance(i, list):
return i
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e)
return res
shell_notify('type for %s uncovered' % (i), state=True, more=type(i)) | def function[to_list, parameter[i, use_keys]]:
constant[
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be ``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
]
from relative_module[photon.util.system] import module[shell_notify]
if <ast.UnaryOp object at 0x7da1b15b5510> begin[:]
return[list[[]]]
if call[name[isinstance], parameter[name[i], name[str]]] begin[:]
return[list[[<ast.Name object at 0x7da1b15b6380>]]]
if call[name[isinstance], parameter[name[i], name[list]]] begin[:]
return[name[i]]
if call[name[isinstance], parameter[name[i], name[dict]]] begin[:]
variable[res] assign[=] call[name[list], parameter[]]
for taget[name[e]] in starred[<ast.IfExp object at 0x7da1b15b56c0>] begin[:]
<ast.IfExp object at 0x7da1b15b4250>
return[name[res]]
call[name[shell_notify], parameter[binary_operation[constant[type for %s uncovered] <ast.Mod object at 0x7da2590d6920> name[i]]]] | keyword[def] identifier[to_list] ( identifier[i] , identifier[use_keys] = keyword[False] ):
literal[string]
keyword[from] identifier[photon] . identifier[util] . identifier[system] keyword[import] identifier[shell_notify]
keyword[if] keyword[not] identifier[i] :
keyword[return] []
keyword[if] identifier[isinstance] ( identifier[i] , identifier[str] ):
keyword[return] [ identifier[i] ]
keyword[if] identifier[isinstance] ( identifier[i] , identifier[list] ):
keyword[return] identifier[i]
keyword[if] identifier[isinstance] ( identifier[i] , identifier[dict] ):
identifier[res] = identifier[list] ()
keyword[for] identifier[e] keyword[in] identifier[i] . identifier[keys] () keyword[if] identifier[use_keys] keyword[else] identifier[i] . identifier[values] ():
identifier[res] . identifier[append] ( identifier[to_list] ( identifier[e] )) keyword[if] identifier[isinstance] ( identifier[e] , identifier[dict] ) keyword[else] identifier[res] . identifier[append] ( identifier[e] )
keyword[return] identifier[res]
identifier[shell_notify] ( literal[string] %( identifier[i] ), identifier[state] = keyword[True] , identifier[more] = identifier[type] ( identifier[i] )) | def to_list(i, use_keys=False):
"""
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be ``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
"""
from photon.util.system import shell_notify
if not i:
return [] # depends on [control=['if'], data=[]]
if isinstance(i, str):
return [i] # depends on [control=['if'], data=[]]
if isinstance(i, list):
return i # depends on [control=['if'], data=[]]
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e) # depends on [control=['for'], data=['e']]
return res # depends on [control=['if'], data=[]]
shell_notify('type for %s uncovered' % i, state=True, more=type(i)) |
def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None])
else:
a.append(x)
return a | def function[flat_list, parameter[]]:
constant[
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
]
variable[a] assign[=] list[[]]
for taget[name[x]] in starred[name[alist]] begin[:]
if compare[name[x] is constant[None]] begin[:]
continue
if call[name[isinstance], parameter[name[x], tuple[[<ast.Name object at 0x7da2044c0a30>, <ast.Name object at 0x7da2044c29b0>]]]] begin[:]
call[name[a].extend, parameter[<ast.ListComp object at 0x7da2044c1e70>]]
return[name[a]] | keyword[def] identifier[flat_list] (* identifier[alist] ):
literal[string]
identifier[a] =[]
keyword[for] identifier[x] keyword[in] identifier[alist] :
keyword[if] identifier[x] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[x] ,( identifier[tuple] , identifier[list] )):
identifier[a] . identifier[extend] ([ identifier[i] keyword[for] identifier[i] keyword[in] identifier[x] keyword[if] identifier[i] keyword[is] keyword[not] keyword[None] ])
keyword[else] :
identifier[a] . identifier[append] ( identifier[x] )
keyword[return] identifier[a] | def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue # depends on [control=['if'], data=[]]
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None]) # depends on [control=['if'], data=[]]
else:
a.append(x) # depends on [control=['for'], data=['x']]
return a |
def create(cls, name_value, name_type):
'''
Returns a Name object, populated with the given value and type
'''
if isinstance(name_value, Name.NameValue):
value = name_value
elif isinstance(name_value, str):
value = cls.NameValue(name_value)
else:
name = 'Name'
msg = exceptions.ErrorStrings.BAD_EXP_RECV
member = 'name_value'
raise TypeError(msg.format('{0}.{1}'.format(name, member),
'name_value', type(Name.NameValue),
type(name_value)))
if isinstance(name_type, Name.NameType):
n_type = name_type
elif isinstance(name_type, Enum):
n_type = cls.NameType(name_type)
else:
name = 'Name'
msg = exceptions.ErrorStrings.BAD_EXP_RECV
member = 'name_type'
raise TypeError(msg.format('{0}.{1}'.format(name, member),
'name_type', type(Name.NameType),
type(name_type)))
return Name(name_value=value,
name_type=n_type) | def function[create, parameter[cls, name_value, name_type]]:
constant[
Returns a Name object, populated with the given value and type
]
if call[name[isinstance], parameter[name[name_value], name[Name].NameValue]] begin[:]
variable[value] assign[=] name[name_value]
if call[name[isinstance], parameter[name[name_type], name[Name].NameType]] begin[:]
variable[n_type] assign[=] name[name_type]
return[call[name[Name], parameter[]]] | keyword[def] identifier[create] ( identifier[cls] , identifier[name_value] , identifier[name_type] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[name_value] , identifier[Name] . identifier[NameValue] ):
identifier[value] = identifier[name_value]
keyword[elif] identifier[isinstance] ( identifier[name_value] , identifier[str] ):
identifier[value] = identifier[cls] . identifier[NameValue] ( identifier[name_value] )
keyword[else] :
identifier[name] = literal[string]
identifier[msg] = identifier[exceptions] . identifier[ErrorStrings] . identifier[BAD_EXP_RECV]
identifier[member] = literal[string]
keyword[raise] identifier[TypeError] ( identifier[msg] . identifier[format] ( literal[string] . identifier[format] ( identifier[name] , identifier[member] ),
literal[string] , identifier[type] ( identifier[Name] . identifier[NameValue] ),
identifier[type] ( identifier[name_value] )))
keyword[if] identifier[isinstance] ( identifier[name_type] , identifier[Name] . identifier[NameType] ):
identifier[n_type] = identifier[name_type]
keyword[elif] identifier[isinstance] ( identifier[name_type] , identifier[Enum] ):
identifier[n_type] = identifier[cls] . identifier[NameType] ( identifier[name_type] )
keyword[else] :
identifier[name] = literal[string]
identifier[msg] = identifier[exceptions] . identifier[ErrorStrings] . identifier[BAD_EXP_RECV]
identifier[member] = literal[string]
keyword[raise] identifier[TypeError] ( identifier[msg] . identifier[format] ( literal[string] . identifier[format] ( identifier[name] , identifier[member] ),
literal[string] , identifier[type] ( identifier[Name] . identifier[NameType] ),
identifier[type] ( identifier[name_type] )))
keyword[return] identifier[Name] ( identifier[name_value] = identifier[value] ,
identifier[name_type] = identifier[n_type] ) | def create(cls, name_value, name_type):
"""
Returns a Name object, populated with the given value and type
"""
if isinstance(name_value, Name.NameValue):
value = name_value # depends on [control=['if'], data=[]]
elif isinstance(name_value, str):
value = cls.NameValue(name_value) # depends on [control=['if'], data=[]]
else:
name = 'Name'
msg = exceptions.ErrorStrings.BAD_EXP_RECV
member = 'name_value'
raise TypeError(msg.format('{0}.{1}'.format(name, member), 'name_value', type(Name.NameValue), type(name_value)))
if isinstance(name_type, Name.NameType):
n_type = name_type # depends on [control=['if'], data=[]]
elif isinstance(name_type, Enum):
n_type = cls.NameType(name_type) # depends on [control=['if'], data=[]]
else:
name = 'Name'
msg = exceptions.ErrorStrings.BAD_EXP_RECV
member = 'name_type'
raise TypeError(msg.format('{0}.{1}'.format(name, member), 'name_type', type(Name.NameType), type(name_type)))
return Name(name_value=value, name_type=n_type) |
def set_default(self, key, default=None):
"""T.set_default(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T"""
try:
return self.get_value(key)
except KeyError:
self.insert(key, default)
return default | def function[set_default, parameter[self, key, default]]:
constant[T.set_default(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T]
<ast.Try object at 0x7da18bc73490> | keyword[def] identifier[set_default] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[get_value] ( identifier[key] )
keyword[except] identifier[KeyError] :
identifier[self] . identifier[insert] ( identifier[key] , identifier[default] )
keyword[return] identifier[default] | def set_default(self, key, default=None):
"""T.set_default(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T"""
try:
return self.get_value(key) # depends on [control=['try'], data=[]]
except KeyError:
self.insert(key, default)
return default # depends on [control=['except'], data=[]] |
def _ctype_key_value(keys, vals):
"""
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys) | def function[_ctype_key_value, parameter[keys, vals]]:
constant[
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
]
if call[name[isinstance], parameter[name[keys], tuple[[<ast.Name object at 0x7da18fe92d40>, <ast.Name object at 0x7da18fe90340>]]]] begin[:]
assert[compare[call[name[len], parameter[name[keys]]] equal[==] call[name[len], parameter[name[vals]]]]]
variable[c_keys] assign[=] list[[]]
variable[c_vals] assign[=] list[[]]
variable[use_str_keys] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18fe92dd0>, <ast.Name object at 0x7da18fe93010>]]] in starred[call[name[zip], parameter[name[keys], name[vals]]]] begin[:]
<ast.Tuple object at 0x7da18fe910c0> assign[=] call[name[_ctype_key_value], parameter[name[key], name[val]]]
<ast.AugAssign object at 0x7da18fe90760>
<ast.AugAssign object at 0x7da18fe910f0>
variable[use_str_keys] assign[=] <ast.IfExp object at 0x7da18fe93190>
assert[compare[name[use_str_keys] equal[==] name[str_keys_i]]]
variable[c_keys_arr] assign[=] <ast.IfExp object at 0x7da18fe91870>
variable[c_vals_arr] assign[=] call[name[c_array], parameter[name[ctypes].c_void_p, name[c_vals]]]
return[tuple[[<ast.Name object at 0x7da18fe91690>, <ast.Name object at 0x7da18fe904f0>, <ast.Name object at 0x7da18fe93610>]]]
assert[call[name[isinstance], parameter[name[keys], binary_operation[tuple[[<ast.Name object at 0x7da18fe90f10>]] + name[string_types]]]]]
variable[use_str_keys] assign[=] call[name[isinstance], parameter[name[keys], name[string_types]]]
if call[name[isinstance], parameter[name[vals], name[NDArray]]] begin[:]
variable[c_keys] assign[=] <ast.IfExp object at 0x7da18fe93dc0>
return[tuple[[<ast.Name object at 0x7da18fe93af0>, <ast.Call object at 0x7da18fe93370>, <ast.Name object at 0x7da20c990f10>]]] | keyword[def] identifier[_ctype_key_value] ( identifier[keys] , identifier[vals] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[keys] ,( identifier[tuple] , identifier[list] )):
keyword[assert] ( identifier[len] ( identifier[keys] )== identifier[len] ( identifier[vals] ))
identifier[c_keys] =[]
identifier[c_vals] =[]
identifier[use_str_keys] = keyword[None]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[zip] ( identifier[keys] , identifier[vals] ):
identifier[c_key_i] , identifier[c_val_i] , identifier[str_keys_i] = identifier[_ctype_key_value] ( identifier[key] , identifier[val] )
identifier[c_keys] += identifier[c_key_i]
identifier[c_vals] += identifier[c_val_i]
identifier[use_str_keys] = identifier[str_keys_i] keyword[if] identifier[use_str_keys] keyword[is] keyword[None] keyword[else] identifier[use_str_keys]
keyword[assert] ( identifier[use_str_keys] == identifier[str_keys_i] ), literal[string]
identifier[c_keys_arr] = identifier[c_array] ( identifier[ctypes] . identifier[c_char_p] , identifier[c_keys] ) keyword[if] identifier[use_str_keys] keyword[else] identifier[c_array] ( identifier[ctypes] . identifier[c_int] , identifier[c_keys] )
identifier[c_vals_arr] = identifier[c_array] ( identifier[ctypes] . identifier[c_void_p] , identifier[c_vals] )
keyword[return] ( identifier[c_keys_arr] , identifier[c_vals_arr] , identifier[use_str_keys] )
keyword[assert] ( identifier[isinstance] ( identifier[keys] ,( identifier[int] ,)+ identifier[string_types] )), literal[string] + identifier[str] ( identifier[type] ( identifier[keys] ))
identifier[use_str_keys] = identifier[isinstance] ( identifier[keys] , identifier[string_types] )
keyword[if] identifier[isinstance] ( identifier[vals] , identifier[NDArray] ):
identifier[c_keys] = identifier[c_str_array] ([ identifier[keys] ]) keyword[if] identifier[use_str_keys] keyword[else] identifier[c_array_buf] ( identifier[ctypes] . identifier[c_int] , identifier[array] ( literal[string] ,[ identifier[keys] ]))
keyword[return] ( identifier[c_keys] , identifier[c_handle_array] ([ identifier[vals] ]), identifier[use_str_keys] )
keyword[else] :
keyword[for] identifier[value] keyword[in] identifier[vals] :
keyword[assert] ( identifier[isinstance] ( identifier[value] , identifier[NDArray] ))
identifier[c_keys] = identifier[c_str_array] ([ identifier[keys] ]* identifier[len] ( identifier[vals] )) keyword[if] identifier[use_str_keys] keyword[else] identifier[c_array_buf] ( identifier[ctypes] . identifier[c_int] , identifier[array] ( literal[string] ,[ identifier[keys] ]* identifier[len] ( identifier[vals] )))
keyword[return] ( identifier[c_keys] , identifier[c_handle_array] ( identifier[vals] ), identifier[use_str_keys] ) | def _ctype_key_value(keys, vals):
"""
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert len(keys) == len(vals)
c_keys = []
c_vals = []
use_str_keys = None
for (key, val) in zip(keys, vals):
(c_key_i, c_val_i, str_keys_i) = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert use_str_keys == str_keys_i, 'inconsistent types of keys detected.' # depends on [control=['for'], data=[]]
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys) # depends on [control=['if'], data=[]]
assert isinstance(keys, (int,) + string_types), 'unexpected type for keys: ' + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys) # depends on [control=['if'], data=[]]
else:
for value in vals:
assert isinstance(value, NDArray) # depends on [control=['for'], data=['value']]
c_keys = c_str_array([keys] * len(vals)) if use_str_keys else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys) |
def get_auth_header(self):
"""
Getting the authorization header according to the authentication procedure
:return [dict]: Authorization header
"""
if self.api.is_authenticated:
return {"Authorization": "Bearer %s" % self.api.access_token}
return {"Authorization": "Client-ID %s" % self.api.client_id} | def function[get_auth_header, parameter[self]]:
constant[
Getting the authorization header according to the authentication procedure
:return [dict]: Authorization header
]
if name[self].api.is_authenticated begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b0627c40>], [<ast.BinOp object at 0x7da1b0624640>]]]
return[dictionary[[<ast.Constant object at 0x7da1b0626230>], [<ast.BinOp object at 0x7da1b0625db0>]]] | keyword[def] identifier[get_auth_header] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[api] . identifier[is_authenticated] :
keyword[return] { literal[string] : literal[string] % identifier[self] . identifier[api] . identifier[access_token] }
keyword[return] { literal[string] : literal[string] % identifier[self] . identifier[api] . identifier[client_id] } | def get_auth_header(self):
"""
Getting the authorization header according to the authentication procedure
:return [dict]: Authorization header
"""
if self.api.is_authenticated:
return {'Authorization': 'Bearer %s' % self.api.access_token} # depends on [control=['if'], data=[]]
return {'Authorization': 'Client-ID %s' % self.api.client_id} |
def create_image_menu_item(self, text, image_name):
"""
Function creates a menu item with an image
"""
menu_item = Gtk.ImageMenuItem(text)
img = self.create_image(image_name)
menu_item.set_image(img)
return menu_item | def function[create_image_menu_item, parameter[self, text, image_name]]:
constant[
Function creates a menu item with an image
]
variable[menu_item] assign[=] call[name[Gtk].ImageMenuItem, parameter[name[text]]]
variable[img] assign[=] call[name[self].create_image, parameter[name[image_name]]]
call[name[menu_item].set_image, parameter[name[img]]]
return[name[menu_item]] | keyword[def] identifier[create_image_menu_item] ( identifier[self] , identifier[text] , identifier[image_name] ):
literal[string]
identifier[menu_item] = identifier[Gtk] . identifier[ImageMenuItem] ( identifier[text] )
identifier[img] = identifier[self] . identifier[create_image] ( identifier[image_name] )
identifier[menu_item] . identifier[set_image] ( identifier[img] )
keyword[return] identifier[menu_item] | def create_image_menu_item(self, text, image_name):
"""
Function creates a menu item with an image
"""
menu_item = Gtk.ImageMenuItem(text)
img = self.create_image(image_name)
menu_item.set_image(img)
return menu_item |
def setnodeval(delta, graph, node, key, value):
"""Change a delta to say that a node stat was set to a certain value"""
if (
graph in delta and 'nodes' in delta[graph] and
node in delta[graph]['nodes'] and not delta[graph]['nodes'][node]
):
return
delta.setdefault(graph, {}).setdefault('node_val', {}).setdefault(node, {})[key] = value | def function[setnodeval, parameter[delta, graph, node, key, value]]:
constant[Change a delta to say that a node stat was set to a certain value]
if <ast.BoolOp object at 0x7da1b0cb7c10> begin[:]
return[None]
call[call[call[call[name[delta].setdefault, parameter[name[graph], dictionary[[], []]]].setdefault, parameter[constant[node_val], dictionary[[], []]]].setdefault, parameter[name[node], dictionary[[], []]]]][name[key]] assign[=] name[value] | keyword[def] identifier[setnodeval] ( identifier[delta] , identifier[graph] , identifier[node] , identifier[key] , identifier[value] ):
literal[string]
keyword[if] (
identifier[graph] keyword[in] identifier[delta] keyword[and] literal[string] keyword[in] identifier[delta] [ identifier[graph] ] keyword[and]
identifier[node] keyword[in] identifier[delta] [ identifier[graph] ][ literal[string] ] keyword[and] keyword[not] identifier[delta] [ identifier[graph] ][ literal[string] ][ identifier[node] ]
):
keyword[return]
identifier[delta] . identifier[setdefault] ( identifier[graph] ,{}). identifier[setdefault] ( literal[string] ,{}). identifier[setdefault] ( identifier[node] ,{})[ identifier[key] ]= identifier[value] | def setnodeval(delta, graph, node, key, value):
"""Change a delta to say that a node stat was set to a certain value"""
if graph in delta and 'nodes' in delta[graph] and (node in delta[graph]['nodes']) and (not delta[graph]['nodes'][node]):
return # depends on [control=['if'], data=[]]
delta.setdefault(graph, {}).setdefault('node_val', {}).setdefault(node, {})[key] = value |
def prior_based_segmentation(image, priors, mask, priorweight=0.25, mrf=0.1, iterations=25):
"""
Spatial prior-based image segmentation.
Markov random field regularized, prior-based image segmentation that is a
wrapper around atropos (see ANTs and related publications).
ANTsR function: `priorBasedSegmentation`
Arguments
---------
image : ANTsImage or list/tuple of ANTsImage types
input image or image list for multivariate segmentation
priors : list/tuple of ANTsImage types
list of priors that cover the number of classes
mask : ANTsImage
segment inside this mask
prior_weight : scalar
usually 0 (priors used for initialization only), 0.25 or 0.5.
mrf : scalar
regularization, higher is smoother, a numerical value in range 0.0 to 0.2
iterations : integer
maximum number of iterations. could be a large value eg 25.
Returns
-------
dictionary with the following key/value pairs:
`segmentation`: ANTsImage
actually segmented image
`probabilityimages` : list of ANTsImage types
one image for each segmentation class
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> seg = ants.kmeans_segmentation(fi,3)
>>> mask = ants.threshold_image(seg['segmentation'], 1, 1e15)
>>> priorseg = ants.prior_based_segmentation(fi, seg['probabilityimages'], mask, 0.25, 0.1, 3)
"""
if isinstance(image, iio.ANTsImage):
dim = image.dimension
elif isinstance(image, (tuple,list)) and (isinstance(image[0], iio.ANTsImage)):
dim = image[0].dimension
else:
raise ValueError('image argument must be ANTsImage or list/tuple of ANTsImage types')
nhood = 'x'.join(['1']*dim)
mrf = '[%s,%s]' % (str(mrf), nhood)
conv = '[%s,0]' % (str(iterations))
pseg = atropos(a=image, m=mrf, c=conv, i=priors, x=mask, priorweight=priorweight)
return pseg | def function[prior_based_segmentation, parameter[image, priors, mask, priorweight, mrf, iterations]]:
constant[
Spatial prior-based image segmentation.
Markov random field regularized, prior-based image segmentation that is a
wrapper around atropos (see ANTs and related publications).
ANTsR function: `priorBasedSegmentation`
Arguments
---------
image : ANTsImage or list/tuple of ANTsImage types
input image or image list for multivariate segmentation
priors : list/tuple of ANTsImage types
list of priors that cover the number of classes
mask : ANTsImage
segment inside this mask
prior_weight : scalar
usually 0 (priors used for initialization only), 0.25 or 0.5.
mrf : scalar
regularization, higher is smoother, a numerical value in range 0.0 to 0.2
iterations : integer
maximum number of iterations. could be a large value eg 25.
Returns
-------
dictionary with the following key/value pairs:
`segmentation`: ANTsImage
actually segmented image
`probabilityimages` : list of ANTsImage types
one image for each segmentation class
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> seg = ants.kmeans_segmentation(fi,3)
>>> mask = ants.threshold_image(seg['segmentation'], 1, 1e15)
>>> priorseg = ants.prior_based_segmentation(fi, seg['probabilityimages'], mask, 0.25, 0.1, 3)
]
if call[name[isinstance], parameter[name[image], name[iio].ANTsImage]] begin[:]
variable[dim] assign[=] name[image].dimension
variable[nhood] assign[=] call[constant[x].join, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b1631cc0>]] * name[dim]]]]
variable[mrf] assign[=] binary_operation[constant[[%s,%s]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b16302e0>, <ast.Name object at 0x7da1b1633220>]]]
variable[conv] assign[=] binary_operation[constant[[%s,0]] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[iterations]]]]
variable[pseg] assign[=] call[name[atropos], parameter[]]
return[name[pseg]] | keyword[def] identifier[prior_based_segmentation] ( identifier[image] , identifier[priors] , identifier[mask] , identifier[priorweight] = literal[int] , identifier[mrf] = literal[int] , identifier[iterations] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[image] , identifier[iio] . identifier[ANTsImage] ):
identifier[dim] = identifier[image] . identifier[dimension]
keyword[elif] identifier[isinstance] ( identifier[image] ,( identifier[tuple] , identifier[list] )) keyword[and] ( identifier[isinstance] ( identifier[image] [ literal[int] ], identifier[iio] . identifier[ANTsImage] )):
identifier[dim] = identifier[image] [ literal[int] ]. identifier[dimension]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[nhood] = literal[string] . identifier[join] ([ literal[string] ]* identifier[dim] )
identifier[mrf] = literal[string] %( identifier[str] ( identifier[mrf] ), identifier[nhood] )
identifier[conv] = literal[string] %( identifier[str] ( identifier[iterations] ))
identifier[pseg] = identifier[atropos] ( identifier[a] = identifier[image] , identifier[m] = identifier[mrf] , identifier[c] = identifier[conv] , identifier[i] = identifier[priors] , identifier[x] = identifier[mask] , identifier[priorweight] = identifier[priorweight] )
keyword[return] identifier[pseg] | def prior_based_segmentation(image, priors, mask, priorweight=0.25, mrf=0.1, iterations=25):
"""
Spatial prior-based image segmentation.
Markov random field regularized, prior-based image segmentation that is a
wrapper around atropos (see ANTs and related publications).
ANTsR function: `priorBasedSegmentation`
Arguments
---------
image : ANTsImage or list/tuple of ANTsImage types
input image or image list for multivariate segmentation
priors : list/tuple of ANTsImage types
list of priors that cover the number of classes
mask : ANTsImage
segment inside this mask
prior_weight : scalar
usually 0 (priors used for initialization only), 0.25 or 0.5.
mrf : scalar
regularization, higher is smoother, a numerical value in range 0.0 to 0.2
iterations : integer
maximum number of iterations. could be a large value eg 25.
Returns
-------
dictionary with the following key/value pairs:
`segmentation`: ANTsImage
actually segmented image
`probabilityimages` : list of ANTsImage types
one image for each segmentation class
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> seg = ants.kmeans_segmentation(fi,3)
>>> mask = ants.threshold_image(seg['segmentation'], 1, 1e15)
>>> priorseg = ants.prior_based_segmentation(fi, seg['probabilityimages'], mask, 0.25, 0.1, 3)
"""
if isinstance(image, iio.ANTsImage):
dim = image.dimension # depends on [control=['if'], data=[]]
elif isinstance(image, (tuple, list)) and isinstance(image[0], iio.ANTsImage):
dim = image[0].dimension # depends on [control=['if'], data=[]]
else:
raise ValueError('image argument must be ANTsImage or list/tuple of ANTsImage types')
nhood = 'x'.join(['1'] * dim)
mrf = '[%s,%s]' % (str(mrf), nhood)
conv = '[%s,0]' % str(iterations)
pseg = atropos(a=image, m=mrf, c=conv, i=priors, x=mask, priorweight=priorweight)
return pseg |
def process_tick(self, tick_tup):
"""Increment tick counter, and call ``process_batch`` for all current
batches if tick counter exceeds ``ticks_between_batches``.
See :class:`pystorm.component.Bolt` for more information.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
"""
self._tick_counter += 1
# ACK tick Tuple immediately, since it's just responsible for counter
self.ack(tick_tup)
if self._tick_counter > self.ticks_between_batches and self._batches:
self.process_batches()
self._tick_counter = 0 | def function[process_tick, parameter[self, tick_tup]]:
constant[Increment tick counter, and call ``process_batch`` for all current
batches if tick counter exceeds ``ticks_between_batches``.
See :class:`pystorm.component.Bolt` for more information.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
]
<ast.AugAssign object at 0x7da1b2345690>
call[name[self].ack, parameter[name[tick_tup]]]
if <ast.BoolOp object at 0x7da1b2347880> begin[:]
call[name[self].process_batches, parameter[]]
name[self]._tick_counter assign[=] constant[0] | keyword[def] identifier[process_tick] ( identifier[self] , identifier[tick_tup] ):
literal[string]
identifier[self] . identifier[_tick_counter] += literal[int]
identifier[self] . identifier[ack] ( identifier[tick_tup] )
keyword[if] identifier[self] . identifier[_tick_counter] > identifier[self] . identifier[ticks_between_batches] keyword[and] identifier[self] . identifier[_batches] :
identifier[self] . identifier[process_batches] ()
identifier[self] . identifier[_tick_counter] = literal[int] | def process_tick(self, tick_tup):
"""Increment tick counter, and call ``process_batch`` for all current
batches if tick counter exceeds ``ticks_between_batches``.
See :class:`pystorm.component.Bolt` for more information.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
"""
self._tick_counter += 1
# ACK tick Tuple immediately, since it's just responsible for counter
self.ack(tick_tup)
if self._tick_counter > self.ticks_between_batches and self._batches:
self.process_batches()
self._tick_counter = 0 # depends on [control=['if'], data=[]] |
def get_assessments(self):
"""Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) | def function[get_assessments, parameter[self]]:
constant[Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[assessment]]]
variable[result] assign[=] call[call[name[collection].find, parameter[call[name[self]._view_filter, parameter[]]]].sort, parameter[constant[_id], name[DESCENDING]]]
return[call[name[objects].AssessmentList, parameter[name[result]]]] | keyword[def] identifier[get_assessments] ( identifier[self] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[result] = identifier[collection] . identifier[find] ( identifier[self] . identifier[_view_filter] ()). identifier[sort] ( literal[string] , identifier[DESCENDING] )
keyword[return] identifier[objects] . identifier[AssessmentList] ( identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ) | def get_assessments(self):
"""Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment', collection='Assessment', runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) |
def TransformerEncoder(vocab_size,
num_classes=10,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
"""Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
"""
input_embedding = layers.Serial(
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len)
)
return layers.Serial(
layers.Branch(), # Branch input to create embedding and mask.
layers.Parallel(input_embedding, layers.PaddingMask()),
layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.FirstBranch(), # Drop the mask.
layers.LayerNorm(),
layers.Mean(axis=1), # Average on length.
layers.Dense(num_classes),
layers.LogSoftmax()
) | def function[TransformerEncoder, parameter[vocab_size, num_classes, feature_depth, feedforward_depth, num_layers, num_heads, dropout, max_len, mode]]:
constant[Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
]
variable[input_embedding] assign[=] call[name[layers].Serial, parameter[call[name[layers].Embedding, parameter[name[feature_depth], name[vocab_size]]], call[name[layers].Dropout, parameter[]], call[name[layers].PositionalEncoding, parameter[]]]]
return[call[name[layers].Serial, parameter[call[name[layers].Branch, parameter[]], call[name[layers].Parallel, parameter[name[input_embedding], call[name[layers].PaddingMask, parameter[]]]], call[name[layers].Serial, parameter[<ast.Starred object at 0x7da1b20586a0>]], call[name[layers].FirstBranch, parameter[]], call[name[layers].LayerNorm, parameter[]], call[name[layers].Mean, parameter[]], call[name[layers].Dense, parameter[name[num_classes]]], call[name[layers].LogSoftmax, parameter[]]]]] | keyword[def] identifier[TransformerEncoder] ( identifier[vocab_size] ,
identifier[num_classes] = literal[int] ,
identifier[feature_depth] = literal[int] ,
identifier[feedforward_depth] = literal[int] ,
identifier[num_layers] = literal[int] ,
identifier[num_heads] = literal[int] ,
identifier[dropout] = literal[int] ,
identifier[max_len] = literal[int] ,
identifier[mode] = literal[string] ):
literal[string]
identifier[input_embedding] = identifier[layers] . identifier[Serial] (
identifier[layers] . identifier[Embedding] ( identifier[feature_depth] , identifier[vocab_size] ),
identifier[layers] . identifier[Dropout] ( identifier[rate] = identifier[dropout] , identifier[mode] = identifier[mode] ),
identifier[layers] . identifier[PositionalEncoding] ( identifier[max_len] = identifier[max_len] )
)
keyword[return] identifier[layers] . identifier[Serial] (
identifier[layers] . identifier[Branch] (),
identifier[layers] . identifier[Parallel] ( identifier[input_embedding] , identifier[layers] . identifier[PaddingMask] ()),
identifier[layers] . identifier[Serial] (*[ identifier[EncoderLayer] ( identifier[feature_depth] , identifier[feedforward_depth] , identifier[num_heads] ,
identifier[dropout] , identifier[mode] )
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[num_layers] )]),
identifier[layers] . identifier[FirstBranch] (),
identifier[layers] . identifier[LayerNorm] (),
identifier[layers] . identifier[Mean] ( identifier[axis] = literal[int] ),
identifier[layers] . identifier[Dense] ( identifier[num_classes] ),
identifier[layers] . identifier[LogSoftmax] ()
) | def TransformerEncoder(vocab_size, num_classes=10, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, max_len=2048, mode='train'):
"""Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
"""
input_embedding = layers.Serial(layers.Embedding(feature_depth, vocab_size), layers.Dropout(rate=dropout, mode=mode), layers.PositionalEncoding(max_len=max_len)) # Branch input to create embedding and mask.
# Drop the mask.
# Average on length.
return layers.Serial(layers.Branch(), layers.Parallel(input_embedding, layers.PaddingMask()), layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode) for _ in range(num_layers)]), layers.FirstBranch(), layers.LayerNorm(), layers.Mean(axis=1), layers.Dense(num_classes), layers.LogSoftmax()) |
def extract(self, item, list_article_candidate):
"""Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
"""
list_topimage = []
for article_candidate in list_article_candidate:
if article_candidate.topimage is not None:
# Changes a relative path of an image to the absolute path of the given url.
article_candidate.topimage = self.image_absoulte_path(item['url'], article_candidate.topimage)
list_topimage.append((article_candidate.topimage, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_topimage) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_topimage if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no topimage extracted by newspaper, return the first result of list_topimage.
return list_topimage[0][0]
else:
return list_newspaper[0][0] | def function[extract, parameter[self, item, list_article_candidate]]:
constant[Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
]
variable[list_topimage] assign[=] list[[]]
for taget[name[article_candidate]] in starred[name[list_article_candidate]] begin[:]
if compare[name[article_candidate].topimage is_not constant[None]] begin[:]
name[article_candidate].topimage assign[=] call[name[self].image_absoulte_path, parameter[call[name[item]][constant[url]], name[article_candidate].topimage]]
call[name[list_topimage].append, parameter[tuple[[<ast.Attribute object at 0x7da20cabec50>, <ast.Attribute object at 0x7da20cabfb20>]]]]
if compare[call[name[len], parameter[name[list_topimage]]] equal[==] constant[0]] begin[:]
return[constant[None]]
variable[list_newspaper] assign[=] <ast.ListComp object at 0x7da20cabe650>
if compare[call[name[len], parameter[name[list_newspaper]]] equal[==] constant[0]] begin[:]
return[call[call[name[list_topimage]][constant[0]]][constant[0]]] | keyword[def] identifier[extract] ( identifier[self] , identifier[item] , identifier[list_article_candidate] ):
literal[string]
identifier[list_topimage] =[]
keyword[for] identifier[article_candidate] keyword[in] identifier[list_article_candidate] :
keyword[if] identifier[article_candidate] . identifier[topimage] keyword[is] keyword[not] keyword[None] :
identifier[article_candidate] . identifier[topimage] = identifier[self] . identifier[image_absoulte_path] ( identifier[item] [ literal[string] ], identifier[article_candidate] . identifier[topimage] )
identifier[list_topimage] . identifier[append] (( identifier[article_candidate] . identifier[topimage] , identifier[article_candidate] . identifier[extractor] ))
keyword[if] identifier[len] ( identifier[list_topimage] )== literal[int] :
keyword[return] keyword[None]
identifier[list_newspaper] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[list_topimage] keyword[if] identifier[x] [ literal[int] ]== literal[string] ]
keyword[if] identifier[len] ( identifier[list_newspaper] )== literal[int] :
keyword[return] identifier[list_topimage] [ literal[int] ][ literal[int] ]
keyword[else] :
keyword[return] identifier[list_newspaper] [ literal[int] ][ literal[int] ] | def extract(self, item, list_article_candidate):
"""Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
"""
list_topimage = []
for article_candidate in list_article_candidate:
if article_candidate.topimage is not None:
# Changes a relative path of an image to the absolute path of the given url.
article_candidate.topimage = self.image_absoulte_path(item['url'], article_candidate.topimage)
list_topimage.append((article_candidate.topimage, article_candidate.extractor)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['article_candidate']]
# If there is no value in the list, return None.
if len(list_topimage) == 0:
return None # depends on [control=['if'], data=[]]
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_topimage if x[1] == 'newspaper']
if len(list_newspaper) == 0:
# If there is no topimage extracted by newspaper, return the first result of list_topimage.
return list_topimage[0][0] # depends on [control=['if'], data=[]]
else:
return list_newspaper[0][0] |
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols) | def function[cov, parameter[self, min_periods]]:
constant[
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
]
variable[numeric_df] assign[=] call[name[self]._get_numeric_data, parameter[]]
variable[cols] assign[=] name[numeric_df].columns
variable[idx] assign[=] call[name[cols].copy, parameter[]]
variable[mat] assign[=] name[numeric_df].values
if call[call[name[notna], parameter[name[mat]]].all, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da20c9936a0> begin[:]
variable[baseCov] assign[=] call[name[np].empty, parameter[tuple[[<ast.Subscript object at 0x7da20c991630>, <ast.Subscript object at 0x7da20c9937f0>]]]]
call[name[baseCov].fill, parameter[name[np].nan]]
variable[baseCov] assign[=] call[name[baseCov].reshape, parameter[tuple[[<ast.Call object at 0x7da20c993b50>, <ast.Call object at 0x7da20c9903a0>]]]]
return[call[name[self]._constructor, parameter[name[baseCov]]]] | keyword[def] identifier[cov] ( identifier[self] , identifier[min_periods] = keyword[None] ):
literal[string]
identifier[numeric_df] = identifier[self] . identifier[_get_numeric_data] ()
identifier[cols] = identifier[numeric_df] . identifier[columns]
identifier[idx] = identifier[cols] . identifier[copy] ()
identifier[mat] = identifier[numeric_df] . identifier[values]
keyword[if] identifier[notna] ( identifier[mat] ). identifier[all] ():
keyword[if] identifier[min_periods] keyword[is] keyword[not] keyword[None] keyword[and] identifier[min_periods] > identifier[len] ( identifier[mat] ):
identifier[baseCov] = identifier[np] . identifier[empty] (( identifier[mat] . identifier[shape] [ literal[int] ], identifier[mat] . identifier[shape] [ literal[int] ]))
identifier[baseCov] . identifier[fill] ( identifier[np] . identifier[nan] )
keyword[else] :
identifier[baseCov] = identifier[np] . identifier[cov] ( identifier[mat] . identifier[T] )
identifier[baseCov] = identifier[baseCov] . identifier[reshape] (( identifier[len] ( identifier[cols] ), identifier[len] ( identifier[cols] )))
keyword[else] :
identifier[baseCov] = identifier[libalgos] . identifier[nancorr] ( identifier[ensure_float64] ( identifier[mat] ), identifier[cov] = keyword[True] ,
identifier[minp] = identifier[min_periods] )
keyword[return] identifier[self] . identifier[_constructor] ( identifier[baseCov] , identifier[index] = identifier[idx] , identifier[columns] = identifier[cols] ) | def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan) # depends on [control=['if'], data=[]]
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols))) # depends on [control=['if'], data=[]]
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols) |
def memo_Y(f):
"""
Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075
"""
sub = {}
def Yf(*args):
hashable_args = tuple([repr(x) for x in args])
if args:
if hashable_args not in sub:
ret = sub[hashable_args] = f(Yf)(*args)
else:
ret = sub[hashable_args]
return ret
return f(Yf)()
return f(Yf) | def function[memo_Y, parameter[f]]:
constant[
Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075
]
variable[sub] assign[=] dictionary[[], []]
def function[Yf, parameter[]]:
variable[hashable_args] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da204567100>]]
if name[args] begin[:]
if compare[name[hashable_args] <ast.NotIn object at 0x7da2590d7190> name[sub]] begin[:]
variable[ret] assign[=] call[call[name[f], parameter[name[Yf]]], parameter[<ast.Starred object at 0x7da204566860>]]
return[name[ret]]
return[call[call[name[f], parameter[name[Yf]]], parameter[]]]
return[call[name[f], parameter[name[Yf]]]] | keyword[def] identifier[memo_Y] ( identifier[f] ):
literal[string]
identifier[sub] ={}
keyword[def] identifier[Yf] (* identifier[args] ):
identifier[hashable_args] = identifier[tuple] ([ identifier[repr] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[args] ])
keyword[if] identifier[args] :
keyword[if] identifier[hashable_args] keyword[not] keyword[in] identifier[sub] :
identifier[ret] = identifier[sub] [ identifier[hashable_args] ]= identifier[f] ( identifier[Yf] )(* identifier[args] )
keyword[else] :
identifier[ret] = identifier[sub] [ identifier[hashable_args] ]
keyword[return] identifier[ret]
keyword[return] identifier[f] ( identifier[Yf] )()
keyword[return] identifier[f] ( identifier[Yf] ) | def memo_Y(f):
"""
Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075
"""
sub = {}
def Yf(*args):
hashable_args = tuple([repr(x) for x in args])
if args:
if hashable_args not in sub:
ret = sub[hashable_args] = f(Yf)(*args) # depends on [control=['if'], data=['hashable_args', 'sub']]
else:
ret = sub[hashable_args]
return ret # depends on [control=['if'], data=[]]
return f(Yf)()
return f(Yf) |
def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values):
"""
creates a keyspace
:param name: name of keyspace to create
:param strategy_class: keyspace replication strategy class
:param replication_factor: keyspace replication factor
:param durable_writes: 1.2 only, write log is bypassed if set to False
:param **replication_values: 1.2 only, additional values to ad to the replication data map
"""
cluster = get_cluster()
if name not in cluster.metadata.keyspaces:
#try the 1.2 method
replication_map = {
'class': strategy_class,
'replication_factor':replication_factor
}
replication_map.update(replication_values)
if strategy_class.lower() != 'simplestrategy':
# Although the Cassandra documentation states for `replication_factor`
# that it is "Required if class is SimpleStrategy; otherwise,
# not used." we get an error if it is present.
replication_map.pop('replication_factor', None)
query = """
CREATE KEYSPACE {}
WITH REPLICATION = {}
""".format(name, json.dumps(replication_map).replace('"', "'"))
if strategy_class != 'SimpleStrategy':
query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false')
execute(query) | def function[create_keyspace, parameter[name, strategy_class, replication_factor, durable_writes]]:
constant[
creates a keyspace
:param name: name of keyspace to create
:param strategy_class: keyspace replication strategy class
:param replication_factor: keyspace replication factor
:param durable_writes: 1.2 only, write log is bypassed if set to False
:param **replication_values: 1.2 only, additional values to ad to the replication data map
]
variable[cluster] assign[=] call[name[get_cluster], parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[cluster].metadata.keyspaces] begin[:]
variable[replication_map] assign[=] dictionary[[<ast.Constant object at 0x7da18dc05720>, <ast.Constant object at 0x7da18dc04e50>], [<ast.Name object at 0x7da18dc06fe0>, <ast.Name object at 0x7da18dc04dc0>]]
call[name[replication_map].update, parameter[name[replication_values]]]
if compare[call[name[strategy_class].lower, parameter[]] not_equal[!=] constant[simplestrategy]] begin[:]
call[name[replication_map].pop, parameter[constant[replication_factor], constant[None]]]
variable[query] assign[=] call[constant[
CREATE KEYSPACE {}
WITH REPLICATION = {}
].format, parameter[name[name], call[call[name[json].dumps, parameter[name[replication_map]]].replace, parameter[constant["], constant[']]]]]
if compare[name[strategy_class] not_equal[!=] constant[SimpleStrategy]] begin[:]
<ast.AugAssign object at 0x7da18f811540>
call[name[execute], parameter[name[query]]] | keyword[def] identifier[create_keyspace] ( identifier[name] , identifier[strategy_class] , identifier[replication_factor] , identifier[durable_writes] = keyword[True] ,** identifier[replication_values] ):
literal[string]
identifier[cluster] = identifier[get_cluster] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[cluster] . identifier[metadata] . identifier[keyspaces] :
identifier[replication_map] ={
literal[string] : identifier[strategy_class] ,
literal[string] : identifier[replication_factor]
}
identifier[replication_map] . identifier[update] ( identifier[replication_values] )
keyword[if] identifier[strategy_class] . identifier[lower] ()!= literal[string] :
identifier[replication_map] . identifier[pop] ( literal[string] , keyword[None] )
identifier[query] = literal[string] . identifier[format] ( identifier[name] , identifier[json] . identifier[dumps] ( identifier[replication_map] ). identifier[replace] ( literal[string] , literal[string] ))
keyword[if] identifier[strategy_class] != literal[string] :
identifier[query] += literal[string] . identifier[format] ( literal[string] keyword[if] identifier[durable_writes] keyword[else] literal[string] )
identifier[execute] ( identifier[query] ) | def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values):
"""
creates a keyspace
:param name: name of keyspace to create
:param strategy_class: keyspace replication strategy class
:param replication_factor: keyspace replication factor
:param durable_writes: 1.2 only, write log is bypassed if set to False
:param **replication_values: 1.2 only, additional values to ad to the replication data map
"""
cluster = get_cluster()
if name not in cluster.metadata.keyspaces:
#try the 1.2 method
replication_map = {'class': strategy_class, 'replication_factor': replication_factor}
replication_map.update(replication_values)
if strategy_class.lower() != 'simplestrategy':
# Although the Cassandra documentation states for `replication_factor`
# that it is "Required if class is SimpleStrategy; otherwise,
# not used." we get an error if it is present.
replication_map.pop('replication_factor', None) # depends on [control=['if'], data=[]]
query = '\n CREATE KEYSPACE {}\n WITH REPLICATION = {}\n '.format(name, json.dumps(replication_map).replace('"', "'"))
if strategy_class != 'SimpleStrategy':
query += ' AND DURABLE_WRITES = {}'.format('true' if durable_writes else 'false') # depends on [control=['if'], data=[]]
execute(query) # depends on [control=['if'], data=['name']] |
def do_handshake_with_ccs_injection(self): # type: ignore
"""Modified do_handshake() to send a CCS injection payload and return the result.
"""
try:
# Start the handshake using nassl - will throw WantReadError right away
self._ssl.do_handshake()
except WantReadError:
# Send the Client Hello
len_to_read = self._network_bio.pending()
while len_to_read:
# Get the data from the SSL engine
handshake_data_out = self._network_bio.read(len_to_read)
# Send it to the peer
self._sock.send(handshake_data_out)
len_to_read = self._network_bio.pending()
# Retrieve the server's response - directly read the underlying network socket
# Retrieve data until we get to the ServerHelloDone
# The server may send back a ServerHello, an Alert or a CertificateRequest first
did_receive_hello_done = False
remaining_bytes = b''
while not did_receive_hello_done:
try:
tls_record, len_consumed = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed::]
except NotEnoughData:
# Try to get more data
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
break
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue
if isinstance(tls_record, TlsHandshakeRecord):
# Does the record contain a ServerDone message?
for handshake_message in tls_record.subprotocol_messages:
if handshake_message.handshake_type == TlsHandshakeTypeByte.SERVER_DONE:
did_receive_hello_done = True
break
# If not, it could be a ServerHello, Certificate or a CertificateRequest if the server requires client auth
elif isinstance(tls_record, TlsAlertRecord):
# Server returned a TLS alert
break
else:
raise ValueError('Unknown record? Type {}'.format(tls_record.header.type))
if did_receive_hello_done:
# Send an early CCS record - this should be rejected by the server
payload = TlsChangeCipherSpecRecord.from_parameters(
tls_version=TlsVersionEnum[self._ssl_version.name]).to_bytes()
self._sock.send(payload)
# Send an early application data record which should be ignored by the server
app_data_record = TlsApplicationDataRecord.from_parameters(tls_version=TlsVersionEnum[self._ssl_version.name],
application_data=b'\x00\x00')
self._sock.send(app_data_record.to_bytes())
# Check if an alert was sent back
while True:
try:
tls_record, len_consumed = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed::]
except NotEnoughData:
# Try to get more data
try:
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
raise NotVulnerableToCcsInjection()
except socket.error:
# Server closed the connection after receiving the CCS payload
raise NotVulnerableToCcsInjection()
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue
if isinstance(tls_record, TlsAlertRecord):
# Server returned a TLS alert but which one?
if tls_record.alert_description == 0x14:
# BAD_RECORD_MAC: This means that the server actually tried to decrypt our early application data
# record instead of ignoring it; server is vulnerable
raise VulnerableToCcsInjection()
# Any other alert means that the server rejected the early CCS record
raise NotVulnerableToCcsInjection()
else:
break
raise NotVulnerableToCcsInjection() | def function[do_handshake_with_ccs_injection, parameter[self]]:
constant[Modified do_handshake() to send a CCS injection payload and return the result.
]
<ast.Try object at 0x7da1b18dce50>
variable[did_receive_hello_done] assign[=] constant[False]
variable[remaining_bytes] assign[=] constant[b'']
while <ast.UnaryOp object at 0x7da1b18dc3d0> begin[:]
<ast.Try object at 0x7da1b18df610>
if call[name[isinstance], parameter[name[tls_record], name[TlsHandshakeRecord]]] begin[:]
for taget[name[handshake_message]] in starred[name[tls_record].subprotocol_messages] begin[:]
if compare[name[handshake_message].handshake_type equal[==] name[TlsHandshakeTypeByte].SERVER_DONE] begin[:]
variable[did_receive_hello_done] assign[=] constant[True]
break
if name[did_receive_hello_done] begin[:]
variable[payload] assign[=] call[call[name[TlsChangeCipherSpecRecord].from_parameters, parameter[]].to_bytes, parameter[]]
call[name[self]._sock.send, parameter[name[payload]]]
variable[app_data_record] assign[=] call[name[TlsApplicationDataRecord].from_parameters, parameter[]]
call[name[self]._sock.send, parameter[call[name[app_data_record].to_bytes, parameter[]]]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b18de050>
if call[name[isinstance], parameter[name[tls_record], name[TlsAlertRecord]]] begin[:]
if compare[name[tls_record].alert_description equal[==] constant[20]] begin[:]
<ast.Raise object at 0x7da1b18df580>
<ast.Raise object at 0x7da1b18de470>
<ast.Raise object at 0x7da1b18dc670> | keyword[def] identifier[do_handshake_with_ccs_injection] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_ssl] . identifier[do_handshake] ()
keyword[except] identifier[WantReadError] :
identifier[len_to_read] = identifier[self] . identifier[_network_bio] . identifier[pending] ()
keyword[while] identifier[len_to_read] :
identifier[handshake_data_out] = identifier[self] . identifier[_network_bio] . identifier[read] ( identifier[len_to_read] )
identifier[self] . identifier[_sock] . identifier[send] ( identifier[handshake_data_out] )
identifier[len_to_read] = identifier[self] . identifier[_network_bio] . identifier[pending] ()
identifier[did_receive_hello_done] = keyword[False]
identifier[remaining_bytes] = literal[string]
keyword[while] keyword[not] identifier[did_receive_hello_done] :
keyword[try] :
identifier[tls_record] , identifier[len_consumed] = identifier[TlsRecordParser] . identifier[parse_bytes] ( identifier[remaining_bytes] )
identifier[remaining_bytes] = identifier[remaining_bytes] [ identifier[len_consumed] ::]
keyword[except] identifier[NotEnoughData] :
identifier[raw_ssl_bytes] = identifier[self] . identifier[_sock] . identifier[recv] ( literal[int] )
keyword[if] keyword[not] identifier[raw_ssl_bytes] :
keyword[break]
identifier[remaining_bytes] = identifier[remaining_bytes] + identifier[raw_ssl_bytes]
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[tls_record] , identifier[TlsHandshakeRecord] ):
keyword[for] identifier[handshake_message] keyword[in] identifier[tls_record] . identifier[subprotocol_messages] :
keyword[if] identifier[handshake_message] . identifier[handshake_type] == identifier[TlsHandshakeTypeByte] . identifier[SERVER_DONE] :
identifier[did_receive_hello_done] = keyword[True]
keyword[break]
keyword[elif] identifier[isinstance] ( identifier[tls_record] , identifier[TlsAlertRecord] ):
keyword[break]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[tls_record] . identifier[header] . identifier[type] ))
keyword[if] identifier[did_receive_hello_done] :
identifier[payload] = identifier[TlsChangeCipherSpecRecord] . identifier[from_parameters] (
identifier[tls_version] = identifier[TlsVersionEnum] [ identifier[self] . identifier[_ssl_version] . identifier[name] ]). identifier[to_bytes] ()
identifier[self] . identifier[_sock] . identifier[send] ( identifier[payload] )
identifier[app_data_record] = identifier[TlsApplicationDataRecord] . identifier[from_parameters] ( identifier[tls_version] = identifier[TlsVersionEnum] [ identifier[self] . identifier[_ssl_version] . identifier[name] ],
identifier[application_data] = literal[string] )
identifier[self] . identifier[_sock] . identifier[send] ( identifier[app_data_record] . identifier[to_bytes] ())
keyword[while] keyword[True] :
keyword[try] :
identifier[tls_record] , identifier[len_consumed] = identifier[TlsRecordParser] . identifier[parse_bytes] ( identifier[remaining_bytes] )
identifier[remaining_bytes] = identifier[remaining_bytes] [ identifier[len_consumed] ::]
keyword[except] identifier[NotEnoughData] :
keyword[try] :
identifier[raw_ssl_bytes] = identifier[self] . identifier[_sock] . identifier[recv] ( literal[int] )
keyword[if] keyword[not] identifier[raw_ssl_bytes] :
keyword[raise] identifier[NotVulnerableToCcsInjection] ()
keyword[except] identifier[socket] . identifier[error] :
keyword[raise] identifier[NotVulnerableToCcsInjection] ()
identifier[remaining_bytes] = identifier[remaining_bytes] + identifier[raw_ssl_bytes]
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[tls_record] , identifier[TlsAlertRecord] ):
keyword[if] identifier[tls_record] . identifier[alert_description] == literal[int] :
keyword[raise] identifier[VulnerableToCcsInjection] ()
keyword[raise] identifier[NotVulnerableToCcsInjection] ()
keyword[else] :
keyword[break]
keyword[raise] identifier[NotVulnerableToCcsInjection] () | def do_handshake_with_ccs_injection(self): # type: ignore
'Modified do_handshake() to send a CCS injection payload and return the result.\n '
try:
# Start the handshake using nassl - will throw WantReadError right away
self._ssl.do_handshake() # depends on [control=['try'], data=[]]
except WantReadError:
# Send the Client Hello
len_to_read = self._network_bio.pending()
while len_to_read:
# Get the data from the SSL engine
handshake_data_out = self._network_bio.read(len_to_read)
# Send it to the peer
self._sock.send(handshake_data_out)
len_to_read = self._network_bio.pending() # depends on [control=['while'], data=[]] # depends on [control=['except'], data=[]]
# Retrieve the server's response - directly read the underlying network socket
# Retrieve data until we get to the ServerHelloDone
# The server may send back a ServerHello, an Alert or a CertificateRequest first
did_receive_hello_done = False
remaining_bytes = b''
while not did_receive_hello_done:
try:
(tls_record, len_consumed) = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed:] # depends on [control=['try'], data=[]]
except NotEnoughData:
# Try to get more data
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
break # depends on [control=['if'], data=[]]
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue # depends on [control=['except'], data=[]]
if isinstance(tls_record, TlsHandshakeRecord):
# Does the record contain a ServerDone message?
for handshake_message in tls_record.subprotocol_messages:
if handshake_message.handshake_type == TlsHandshakeTypeByte.SERVER_DONE:
did_receive_hello_done = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handshake_message']] # depends on [control=['if'], data=[]]
# If not, it could be a ServerHello, Certificate or a CertificateRequest if the server requires client auth
elif isinstance(tls_record, TlsAlertRecord):
# Server returned a TLS alert
break # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown record? Type {}'.format(tls_record.header.type)) # depends on [control=['while'], data=[]]
if did_receive_hello_done:
# Send an early CCS record - this should be rejected by the server
payload = TlsChangeCipherSpecRecord.from_parameters(tls_version=TlsVersionEnum[self._ssl_version.name]).to_bytes()
self._sock.send(payload)
# Send an early application data record which should be ignored by the server
app_data_record = TlsApplicationDataRecord.from_parameters(tls_version=TlsVersionEnum[self._ssl_version.name], application_data=b'\x00\x00')
self._sock.send(app_data_record.to_bytes())
# Check if an alert was sent back
while True:
try:
(tls_record, len_consumed) = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed:] # depends on [control=['try'], data=[]]
except NotEnoughData:
# Try to get more data
try:
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
raise NotVulnerableToCcsInjection() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except socket.error:
# Server closed the connection after receiving the CCS payload
raise NotVulnerableToCcsInjection() # depends on [control=['except'], data=[]]
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue # depends on [control=['except'], data=[]]
if isinstance(tls_record, TlsAlertRecord):
# Server returned a TLS alert but which one?
if tls_record.alert_description == 20:
# BAD_RECORD_MAC: This means that the server actually tried to decrypt our early application data
# record instead of ignoring it; server is vulnerable
raise VulnerableToCcsInjection() # depends on [control=['if'], data=[]]
# Any other alert means that the server rejected the early CCS record
raise NotVulnerableToCcsInjection() # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
raise NotVulnerableToCcsInjection() # depends on [control=['if'], data=[]] |
def _request(self, method, url, headers=None, **kwargs):
"""
Normally the connection guarantees response times of 3 seconds on average,
if there is an abnormal situation, the maximum response time is 1 minute.
It is highly recommended that you set “timeouts” when you connect with PayU.
Args:
method:
url:
headers:
**kwargs:
Returns:
"""
_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
if headers:
_headers.update(headers)
if self.is_debug:
self.logger.debug('{} {} {} {}'.format(method, url, headers, kwargs))
return self._parse(requests.request(method, url, headers=_headers, timeout=60, **kwargs)) | def function[_request, parameter[self, method, url, headers]]:
constant[
Normally the connection guarantees response times of 3 seconds on average,
if there is an abnormal situation, the maximum response time is 1 minute.
It is highly recommended that you set “timeouts” when you connect with PayU.
Args:
method:
url:
headers:
**kwargs:
Returns:
]
variable[_headers] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c75b0>, <ast.Constant object at 0x7da20c6c41f0>], [<ast.Constant object at 0x7da20c6c7ac0>, <ast.Constant object at 0x7da20c6c6ce0>]]
if name[headers] begin[:]
call[name[_headers].update, parameter[name[headers]]]
if name[self].is_debug begin[:]
call[name[self].logger.debug, parameter[call[constant[{} {} {} {}].format, parameter[name[method], name[url], name[headers], name[kwargs]]]]]
return[call[name[self]._parse, parameter[call[name[requests].request, parameter[name[method], name[url]]]]]] | keyword[def] identifier[_request] ( identifier[self] , identifier[method] , identifier[url] , identifier[headers] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[_headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[if] identifier[headers] :
identifier[_headers] . identifier[update] ( identifier[headers] )
keyword[if] identifier[self] . identifier[is_debug] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[method] , identifier[url] , identifier[headers] , identifier[kwargs] ))
keyword[return] identifier[self] . identifier[_parse] ( identifier[requests] . identifier[request] ( identifier[method] , identifier[url] , identifier[headers] = identifier[_headers] , identifier[timeout] = literal[int] ,** identifier[kwargs] )) | def _request(self, method, url, headers=None, **kwargs):
"""
Normally the connection guarantees response times of 3 seconds on average,
if there is an abnormal situation, the maximum response time is 1 minute.
It is highly recommended that you set “timeouts” when you connect with PayU.
Args:
method:
url:
headers:
**kwargs:
Returns:
"""
_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
if headers:
_headers.update(headers) # depends on [control=['if'], data=[]]
if self.is_debug:
self.logger.debug('{} {} {} {}'.format(method, url, headers, kwargs)) # depends on [control=['if'], data=[]]
return self._parse(requests.request(method, url, headers=_headers, timeout=60, **kwargs)) |
def transform_record(self, pid, record, links_factory=None):
"""Transform record into an intermediate representation.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:param links_factory: The link factory. (Default: ``None``)
:returns: The intermediate representation for the record.
"""
return self.dump(self.preprocess_record(pid, record,
links_factory=links_factory)) | def function[transform_record, parameter[self, pid, record, links_factory]]:
constant[Transform record into an intermediate representation.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:param links_factory: The link factory. (Default: ``None``)
:returns: The intermediate representation for the record.
]
return[call[name[self].dump, parameter[call[name[self].preprocess_record, parameter[name[pid], name[record]]]]]] | keyword[def] identifier[transform_record] ( identifier[self] , identifier[pid] , identifier[record] , identifier[links_factory] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[dump] ( identifier[self] . identifier[preprocess_record] ( identifier[pid] , identifier[record] ,
identifier[links_factory] = identifier[links_factory] )) | def transform_record(self, pid, record, links_factory=None):
"""Transform record into an intermediate representation.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:param links_factory: The link factory. (Default: ``None``)
:returns: The intermediate representation for the record.
"""
return self.dump(self.preprocess_record(pid, record, links_factory=links_factory)) |
def get_name(self):
"""
@rtype: str
@return: Module name, as used in labels.
@warning: Names are B{NOT} guaranteed to be unique.
If you need unique identification for a loaded module,
use the base address instead.
@see: L{get_label}
"""
pathname = self.get_filename()
if pathname:
modName = self.__filename_to_modname(pathname)
if isinstance(modName, compat.unicode):
try:
modName = modName.encode('cp1252')
except UnicodeEncodeError:
e = sys.exc_info()[1]
warnings.warn(str(e))
else:
modName = "0x%x" % self.get_base()
return modName | def function[get_name, parameter[self]]:
constant[
@rtype: str
@return: Module name, as used in labels.
@warning: Names are B{NOT} guaranteed to be unique.
If you need unique identification for a loaded module,
use the base address instead.
@see: L{get_label}
]
variable[pathname] assign[=] call[name[self].get_filename, parameter[]]
if name[pathname] begin[:]
variable[modName] assign[=] call[name[self].__filename_to_modname, parameter[name[pathname]]]
if call[name[isinstance], parameter[name[modName], name[compat].unicode]] begin[:]
<ast.Try object at 0x7da20c6c6da0>
return[name[modName]] | keyword[def] identifier[get_name] ( identifier[self] ):
literal[string]
identifier[pathname] = identifier[self] . identifier[get_filename] ()
keyword[if] identifier[pathname] :
identifier[modName] = identifier[self] . identifier[__filename_to_modname] ( identifier[pathname] )
keyword[if] identifier[isinstance] ( identifier[modName] , identifier[compat] . identifier[unicode] ):
keyword[try] :
identifier[modName] = identifier[modName] . identifier[encode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
identifier[warnings] . identifier[warn] ( identifier[str] ( identifier[e] ))
keyword[else] :
identifier[modName] = literal[string] % identifier[self] . identifier[get_base] ()
keyword[return] identifier[modName] | def get_name(self):
"""
@rtype: str
@return: Module name, as used in labels.
@warning: Names are B{NOT} guaranteed to be unique.
If you need unique identification for a loaded module,
use the base address instead.
@see: L{get_label}
"""
pathname = self.get_filename()
if pathname:
modName = self.__filename_to_modname(pathname)
if isinstance(modName, compat.unicode):
try:
modName = modName.encode('cp1252') # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
e = sys.exc_info()[1]
warnings.warn(str(e)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
modName = '0x%x' % self.get_base()
return modName |
def _match_tags(repex_tags, path_tags):
"""Check for matching tags between what the user provided
and the tags set in the config.
If `any` is chosen, match.
If no tags are chosen and none are configured, match.
If the user provided tags match any of the configured tags, match.
"""
if 'any' in repex_tags or (not repex_tags and not path_tags):
return True
elif set(repex_tags) & set(path_tags):
return True
return False | def function[_match_tags, parameter[repex_tags, path_tags]]:
constant[Check for matching tags between what the user provided
and the tags set in the config.
If `any` is chosen, match.
If no tags are chosen and none are configured, match.
If the user provided tags match any of the configured tags, match.
]
if <ast.BoolOp object at 0x7da1b10c0550> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_match_tags] ( identifier[repex_tags] , identifier[path_tags] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[repex_tags] keyword[or] ( keyword[not] identifier[repex_tags] keyword[and] keyword[not] identifier[path_tags] ):
keyword[return] keyword[True]
keyword[elif] identifier[set] ( identifier[repex_tags] )& identifier[set] ( identifier[path_tags] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def _match_tags(repex_tags, path_tags):
"""Check for matching tags between what the user provided
and the tags set in the config.
If `any` is chosen, match.
If no tags are chosen and none are configured, match.
If the user provided tags match any of the configured tags, match.
"""
if 'any' in repex_tags or (not repex_tags and (not path_tags)):
return True # depends on [control=['if'], data=[]]
elif set(repex_tags) & set(path_tags):
return True # depends on [control=['if'], data=[]]
return False |
def save_config(self, cmd="write", confirm=False, confirm_response=""):
"""Saves Config Using write command"""
return super(IpInfusionOcNOSBase, self).save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
) | def function[save_config, parameter[self, cmd, confirm, confirm_response]]:
constant[Saves Config Using write command]
return[call[call[name[super], parameter[name[IpInfusionOcNOSBase], name[self]]].save_config, parameter[]]] | keyword[def] identifier[save_config] ( identifier[self] , identifier[cmd] = literal[string] , identifier[confirm] = keyword[False] , identifier[confirm_response] = literal[string] ):
literal[string]
keyword[return] identifier[super] ( identifier[IpInfusionOcNOSBase] , identifier[self] ). identifier[save_config] (
identifier[cmd] = identifier[cmd] , identifier[confirm] = identifier[confirm] , identifier[confirm_response] = identifier[confirm_response]
) | def save_config(self, cmd='write', confirm=False, confirm_response=''):
"""Saves Config Using write command"""
return super(IpInfusionOcNOSBase, self).save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response) |
def aggregate_grads(all_grads,
colocation=False,
devices=None,
average=True):
"""
Average the gradients.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
colocation (bool): colocate gradient averaging on the device of the variable.
devices (list[str]): assign the averaging to these device in
round-robin. Cannot be used together with ``colocation``.
average (bool): do average or sum
Returns:
(N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.
"""
assert not (devices is not None and colocation)
if devices is not None:
assert isinstance(devices, list), devices
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads[0]
def aggregate(grads):
if average:
return tf.multiply(tf.add_n(grads), 1.0 / nr_tower)
else:
return tf.add_n(grads)
ret = []
for idx, grad_and_vars in enumerate(zip(*all_grads)):
# Ngpu * 2
v = grad_and_vars[0][1]
grads = [g for (g, _) in grad_and_vars]
if colocation:
with tf.device(v.device): # colocate summed grad with var
grad = aggregate(grads)
elif devices is None:
grad = aggregate(grads)
else:
dev = devices[idx % len(devices)]
with tf.device(dev):
grad = aggregate(grads)
ret.append((grad, v))
return ret | def function[aggregate_grads, parameter[all_grads, colocation, devices, average]]:
constant[
Average the gradients.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
colocation (bool): colocate gradient averaging on the device of the variable.
devices (list[str]): assign the averaging to these device in
round-robin. Cannot be used together with ``colocation``.
average (bool): do average or sum
Returns:
(N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.
]
assert[<ast.UnaryOp object at 0x7da18f58c370>]
if compare[name[devices] is_not constant[None]] begin[:]
assert[call[name[isinstance], parameter[name[devices], name[list]]]]
variable[nr_tower] assign[=] call[name[len], parameter[name[all_grads]]]
if compare[name[nr_tower] equal[==] constant[1]] begin[:]
return[call[name[all_grads]][constant[0]]]
def function[aggregate, parameter[grads]]:
if name[average] begin[:]
return[call[name[tf].multiply, parameter[call[name[tf].add_n, parameter[name[grads]]], binary_operation[constant[1.0] / name[nr_tower]]]]]
variable[ret] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18bcca350>, <ast.Name object at 0x7da18bcc8970>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da18bccb8e0>]]]]] begin[:]
variable[v] assign[=] call[call[name[grad_and_vars]][constant[0]]][constant[1]]
variable[grads] assign[=] <ast.ListComp object at 0x7da18bcca860>
if name[colocation] begin[:]
with call[name[tf].device, parameter[name[v].device]] begin[:]
variable[grad] assign[=] call[name[aggregate], parameter[name[grads]]]
call[name[ret].append, parameter[tuple[[<ast.Name object at 0x7da18bcca920>, <ast.Name object at 0x7da18bcc8b20>]]]]
return[name[ret]] | keyword[def] identifier[aggregate_grads] ( identifier[all_grads] ,
identifier[colocation] = keyword[False] ,
identifier[devices] = keyword[None] ,
identifier[average] = keyword[True] ):
literal[string]
keyword[assert] keyword[not] ( identifier[devices] keyword[is] keyword[not] keyword[None] keyword[and] identifier[colocation] )
keyword[if] identifier[devices] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[devices] , identifier[list] ), identifier[devices]
identifier[nr_tower] = identifier[len] ( identifier[all_grads] )
keyword[if] identifier[nr_tower] == literal[int] :
keyword[return] identifier[all_grads] [ literal[int] ]
keyword[def] identifier[aggregate] ( identifier[grads] ):
keyword[if] identifier[average] :
keyword[return] identifier[tf] . identifier[multiply] ( identifier[tf] . identifier[add_n] ( identifier[grads] ), literal[int] / identifier[nr_tower] )
keyword[else] :
keyword[return] identifier[tf] . identifier[add_n] ( identifier[grads] )
identifier[ret] =[]
keyword[for] identifier[idx] , identifier[grad_and_vars] keyword[in] identifier[enumerate] ( identifier[zip] (* identifier[all_grads] )):
identifier[v] = identifier[grad_and_vars] [ literal[int] ][ literal[int] ]
identifier[grads] =[ identifier[g] keyword[for] ( identifier[g] , identifier[_] ) keyword[in] identifier[grad_and_vars] ]
keyword[if] identifier[colocation] :
keyword[with] identifier[tf] . identifier[device] ( identifier[v] . identifier[device] ):
identifier[grad] = identifier[aggregate] ( identifier[grads] )
keyword[elif] identifier[devices] keyword[is] keyword[None] :
identifier[grad] = identifier[aggregate] ( identifier[grads] )
keyword[else] :
identifier[dev] = identifier[devices] [ identifier[idx] % identifier[len] ( identifier[devices] )]
keyword[with] identifier[tf] . identifier[device] ( identifier[dev] ):
identifier[grad] = identifier[aggregate] ( identifier[grads] )
identifier[ret] . identifier[append] (( identifier[grad] , identifier[v] ))
keyword[return] identifier[ret] | def aggregate_grads(all_grads, colocation=False, devices=None, average=True):
"""
Average the gradients.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
colocation (bool): colocate gradient averaging on the device of the variable.
devices (list[str]): assign the averaging to these device in
round-robin. Cannot be used together with ``colocation``.
average (bool): do average or sum
Returns:
(N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.
"""
assert not (devices is not None and colocation)
if devices is not None:
assert isinstance(devices, list), devices # depends on [control=['if'], data=['devices']]
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads[0] # depends on [control=['if'], data=[]]
def aggregate(grads):
if average:
return tf.multiply(tf.add_n(grads), 1.0 / nr_tower) # depends on [control=['if'], data=[]]
else:
return tf.add_n(grads)
ret = []
for (idx, grad_and_vars) in enumerate(zip(*all_grads)):
# Ngpu * 2
v = grad_and_vars[0][1]
grads = [g for (g, _) in grad_and_vars]
if colocation:
with tf.device(v.device): # colocate summed grad with var
grad = aggregate(grads) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
elif devices is None:
grad = aggregate(grads) # depends on [control=['if'], data=[]]
else:
dev = devices[idx % len(devices)]
with tf.device(dev):
grad = aggregate(grads) # depends on [control=['with'], data=[]]
ret.append((grad, v)) # depends on [control=['for'], data=[]]
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.