code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def make_skiplist(*args, use_fallback=False):
'''Create a new skiplist'''
sl = fallback.Skiplist if use_fallback else Skiplist
return sl(*args) | def function[make_skiplist, parameter[]]:
constant[Create a new skiplist]
variable[sl] assign[=] <ast.IfExp object at 0x7da18ede7bb0>
return[call[name[sl], parameter[<ast.Starred object at 0x7da18ede7b50>]]] | keyword[def] identifier[make_skiplist] (* identifier[args] , identifier[use_fallback] = keyword[False] ):
literal[string]
identifier[sl] = identifier[fallback] . identifier[Skiplist] keyword[if] identifier[use_fallback] keyword[else] identifier[Skiplist]
keyword[return] identifier[sl] (* identifier[args] ) | def make_skiplist(*args, use_fallback=False):
"""Create a new skiplist"""
sl = fallback.Skiplist if use_fallback else Skiplist
return sl(*args) |
def force_option_value(self, opt_name, value):
""" force the (default) value of an option.
The option is then no more listed by :func:`get_options()`.
:param opt_name: option name
:type opt_name: str
:param value: option value
"""
if not self.has_option(opt_name):
raise ValueError("Unknow option name (%s)" % opt_name)
self._options[opt_name].default = value # also change the value
self._options[opt_name].hidden = True | def function[force_option_value, parameter[self, opt_name, value]]:
constant[ force the (default) value of an option.
The option is then no more listed by :func:`get_options()`.
:param opt_name: option name
:type opt_name: str
:param value: option value
]
if <ast.UnaryOp object at 0x7da18f00dcf0> begin[:]
<ast.Raise object at 0x7da18f00d600>
call[name[self]._options][name[opt_name]].default assign[=] name[value]
call[name[self]._options][name[opt_name]].hidden assign[=] constant[True] | keyword[def] identifier[force_option_value] ( identifier[self] , identifier[opt_name] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_option] ( identifier[opt_name] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[opt_name] )
identifier[self] . identifier[_options] [ identifier[opt_name] ]. identifier[default] = identifier[value]
identifier[self] . identifier[_options] [ identifier[opt_name] ]. identifier[hidden] = keyword[True] | def force_option_value(self, opt_name, value):
""" force the (default) value of an option.
The option is then no more listed by :func:`get_options()`.
:param opt_name: option name
:type opt_name: str
:param value: option value
"""
if not self.has_option(opt_name):
raise ValueError('Unknow option name (%s)' % opt_name) # depends on [control=['if'], data=[]]
self._options[opt_name].default = value # also change the value
self._options[opt_name].hidden = True |
def is_subdomain_zonefile_hash(self, fqn, zonefile_hash, cur=None):
"""
Does this zone file hash belong to this subdomain?
"""
sql = 'SELECT COUNT(zonefile_hash) FROM {} WHERE fully_qualified_subdomain = ? and zonefile_hash = ?;'.format(self.subdomain_table)
args = (fqn,zonefile_hash)
cursor = None
if cur is None:
cursor = self.conn.cursor()
else:
cursor = cur
rows = db_query_execute(cursor, sql, args)
count = None
for row in rows:
count = row['COUNT(zonefile_hash)']
break
return (count > 0) | def function[is_subdomain_zonefile_hash, parameter[self, fqn, zonefile_hash, cur]]:
constant[
Does this zone file hash belong to this subdomain?
]
variable[sql] assign[=] call[constant[SELECT COUNT(zonefile_hash) FROM {} WHERE fully_qualified_subdomain = ? and zonefile_hash = ?;].format, parameter[name[self].subdomain_table]]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b180d930>, <ast.Name object at 0x7da1b180f640>]]
variable[cursor] assign[=] constant[None]
if compare[name[cur] is constant[None]] begin[:]
variable[cursor] assign[=] call[name[self].conn.cursor, parameter[]]
variable[rows] assign[=] call[name[db_query_execute], parameter[name[cursor], name[sql], name[args]]]
variable[count] assign[=] constant[None]
for taget[name[row]] in starred[name[rows]] begin[:]
variable[count] assign[=] call[name[row]][constant[COUNT(zonefile_hash)]]
break
return[compare[name[count] greater[>] constant[0]]] | keyword[def] identifier[is_subdomain_zonefile_hash] ( identifier[self] , identifier[fqn] , identifier[zonefile_hash] , identifier[cur] = keyword[None] ):
literal[string]
identifier[sql] = literal[string] . identifier[format] ( identifier[self] . identifier[subdomain_table] )
identifier[args] =( identifier[fqn] , identifier[zonefile_hash] )
identifier[cursor] = keyword[None]
keyword[if] identifier[cur] keyword[is] keyword[None] :
identifier[cursor] = identifier[self] . identifier[conn] . identifier[cursor] ()
keyword[else] :
identifier[cursor] = identifier[cur]
identifier[rows] = identifier[db_query_execute] ( identifier[cursor] , identifier[sql] , identifier[args] )
identifier[count] = keyword[None]
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[count] = identifier[row] [ literal[string] ]
keyword[break]
keyword[return] ( identifier[count] > literal[int] ) | def is_subdomain_zonefile_hash(self, fqn, zonefile_hash, cur=None):
"""
Does this zone file hash belong to this subdomain?
"""
sql = 'SELECT COUNT(zonefile_hash) FROM {} WHERE fully_qualified_subdomain = ? and zonefile_hash = ?;'.format(self.subdomain_table)
args = (fqn, zonefile_hash)
cursor = None
if cur is None:
cursor = self.conn.cursor() # depends on [control=['if'], data=[]]
else:
cursor = cur
rows = db_query_execute(cursor, sql, args)
count = None
for row in rows:
count = row['COUNT(zonefile_hash)']
break # depends on [control=['for'], data=['row']]
return count > 0 |
def capakey_rest_gateway_request(url, headers={}, params={}):
'''
Utility function that helps making requests to the CAPAKEY REST service.
:param string url: URL to request.
:param dict headers: Headers to send with the URL.
:param dict params: Parameters to send with the URL.
:returns: Result of the call.
'''
try:
res = requests.get(url, headers=headers, params=params)
res.raise_for_status()
return res
except requests.ConnectionError as ce:
raise GatewayRuntimeException(
'Could not execute request due to connection problems:\n%s' % repr(ce),
ce
)
except requests.HTTPError as he:
raise GatewayResourceNotFoundException()
except requests.RequestException as re:
raise GatewayRuntimeException(
'Could not execute request due to:\n%s' % repr(re),
re
) | def function[capakey_rest_gateway_request, parameter[url, headers, params]]:
constant[
Utility function that helps making requests to the CAPAKEY REST service.
:param string url: URL to request.
:param dict headers: Headers to send with the URL.
:param dict params: Parameters to send with the URL.
:returns: Result of the call.
]
<ast.Try object at 0x7da1b0a04fa0> | keyword[def] identifier[capakey_rest_gateway_request] ( identifier[url] , identifier[headers] ={}, identifier[params] ={}):
literal[string]
keyword[try] :
identifier[res] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[params] = identifier[params] )
identifier[res] . identifier[raise_for_status] ()
keyword[return] identifier[res]
keyword[except] identifier[requests] . identifier[ConnectionError] keyword[as] identifier[ce] :
keyword[raise] identifier[GatewayRuntimeException] (
literal[string] % identifier[repr] ( identifier[ce] ),
identifier[ce]
)
keyword[except] identifier[requests] . identifier[HTTPError] keyword[as] identifier[he] :
keyword[raise] identifier[GatewayResourceNotFoundException] ()
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[re] :
keyword[raise] identifier[GatewayRuntimeException] (
literal[string] % identifier[repr] ( identifier[re] ),
identifier[re]
) | def capakey_rest_gateway_request(url, headers={}, params={}):
"""
Utility function that helps making requests to the CAPAKEY REST service.
:param string url: URL to request.
:param dict headers: Headers to send with the URL.
:param dict params: Parameters to send with the URL.
:returns: Result of the call.
"""
try:
res = requests.get(url, headers=headers, params=params)
res.raise_for_status()
return res # depends on [control=['try'], data=[]]
except requests.ConnectionError as ce:
raise GatewayRuntimeException('Could not execute request due to connection problems:\n%s' % repr(ce), ce) # depends on [control=['except'], data=['ce']]
except requests.HTTPError as he:
raise GatewayResourceNotFoundException() # depends on [control=['except'], data=[]]
except requests.RequestException as re:
raise GatewayRuntimeException('Could not execute request due to:\n%s' % repr(re), re) # depends on [control=['except'], data=['re']] |
def whitelist_licenses_policy(policy_name: str, allowed_licenses: set):
"""A policy factory for making license-based whitelist policies.
To apply in project, include the function returned from this factory
in the ilst returned by the `get_policies` function implemented in the
project `YSettings` file.
The factory returns a policy function named
`whitelist_{policy_name}_licenses` that applies to targets with
`policy_name` in their policies list.
The returned policy asserts that all licenses contained in the target
(including through explicit & implicit dependencies) are in the whitelist
defined by `allowed_licenses`.
See example in tests/errors.
"""
def policy_func(build_context, target):
"""whitelist_{policy_name}_licenses policy function.
Return error message (string) if policy for `target` is violated,
otherwise return `None`.
"""
if policy_name in target.props.policies:
licenses = set(target.props.license)
for dep in build_context.generate_all_deps(target):
licenses.update(dep.props.license)
licenses.difference_update(allowed_licenses)
if licenses:
return 'Invalid licenses for {} policy: {}'.format(
policy_name, ', '.join(sorted(licenses)))
return None
policy_func.__name__ = 'whitelist_{}_licenses'.format(policy_name)
return policy_func | def function[whitelist_licenses_policy, parameter[policy_name, allowed_licenses]]:
constant[A policy factory for making license-based whitelist policies.
To apply in project, include the function returned from this factory
in the ilst returned by the `get_policies` function implemented in the
project `YSettings` file.
The factory returns a policy function named
`whitelist_{policy_name}_licenses` that applies to targets with
`policy_name` in their policies list.
The returned policy asserts that all licenses contained in the target
(including through explicit & implicit dependencies) are in the whitelist
defined by `allowed_licenses`.
See example in tests/errors.
]
def function[policy_func, parameter[build_context, target]]:
constant[whitelist_{policy_name}_licenses policy function.
Return error message (string) if policy for `target` is violated,
otherwise return `None`.
]
if compare[name[policy_name] in name[target].props.policies] begin[:]
variable[licenses] assign[=] call[name[set], parameter[name[target].props.license]]
for taget[name[dep]] in starred[call[name[build_context].generate_all_deps, parameter[name[target]]]] begin[:]
call[name[licenses].update, parameter[name[dep].props.license]]
call[name[licenses].difference_update, parameter[name[allowed_licenses]]]
if name[licenses] begin[:]
return[call[constant[Invalid licenses for {} policy: {}].format, parameter[name[policy_name], call[constant[, ].join, parameter[call[name[sorted], parameter[name[licenses]]]]]]]]
return[constant[None]]
name[policy_func].__name__ assign[=] call[constant[whitelist_{}_licenses].format, parameter[name[policy_name]]]
return[name[policy_func]] | keyword[def] identifier[whitelist_licenses_policy] ( identifier[policy_name] : identifier[str] , identifier[allowed_licenses] : identifier[set] ):
literal[string]
keyword[def] identifier[policy_func] ( identifier[build_context] , identifier[target] ):
literal[string]
keyword[if] identifier[policy_name] keyword[in] identifier[target] . identifier[props] . identifier[policies] :
identifier[licenses] = identifier[set] ( identifier[target] . identifier[props] . identifier[license] )
keyword[for] identifier[dep] keyword[in] identifier[build_context] . identifier[generate_all_deps] ( identifier[target] ):
identifier[licenses] . identifier[update] ( identifier[dep] . identifier[props] . identifier[license] )
identifier[licenses] . identifier[difference_update] ( identifier[allowed_licenses] )
keyword[if] identifier[licenses] :
keyword[return] literal[string] . identifier[format] (
identifier[policy_name] , literal[string] . identifier[join] ( identifier[sorted] ( identifier[licenses] )))
keyword[return] keyword[None]
identifier[policy_func] . identifier[__name__] = literal[string] . identifier[format] ( identifier[policy_name] )
keyword[return] identifier[policy_func] | def whitelist_licenses_policy(policy_name: str, allowed_licenses: set):
"""A policy factory for making license-based whitelist policies.
To apply in project, include the function returned from this factory
in the ilst returned by the `get_policies` function implemented in the
project `YSettings` file.
The factory returns a policy function named
`whitelist_{policy_name}_licenses` that applies to targets with
`policy_name` in their policies list.
The returned policy asserts that all licenses contained in the target
(including through explicit & implicit dependencies) are in the whitelist
defined by `allowed_licenses`.
See example in tests/errors.
"""
def policy_func(build_context, target):
"""whitelist_{policy_name}_licenses policy function.
Return error message (string) if policy for `target` is violated,
otherwise return `None`.
"""
if policy_name in target.props.policies:
licenses = set(target.props.license)
for dep in build_context.generate_all_deps(target):
licenses.update(dep.props.license) # depends on [control=['for'], data=['dep']]
licenses.difference_update(allowed_licenses)
if licenses:
return 'Invalid licenses for {} policy: {}'.format(policy_name, ', '.join(sorted(licenses))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['policy_name']]
return None
policy_func.__name__ = 'whitelist_{}_licenses'.format(policy_name)
return policy_func |
def parse_isodate(datestr):
"""Parse a string that loosely fits ISO 8601 formatted date-time string
"""
m = isodate_rx.search(datestr)
assert m, 'unrecognized date format: ' + datestr
year, month, day = m.group('year', 'month', 'day')
hour, minute, second, fraction = m.group('hour', 'minute', 'second', 'fraction')
tz, tzhh, tzmm = m.group('tz', 'tzhh', 'tzmm')
dt = datetime.datetime(int(year), int(month), int(day), int(hour))
if fraction is None:
fraction = 0
else:
fraction = float('0.' + fraction)
if minute is None:
dt = dt.replace(minute=int(60 * fraction))
else:
dt = dt.replace(minute=int(minute))
if second is None:
dt = dt.replace(second=int(60 * fraction))
else:
dt = dt.replace(second=int(second), microsecond=int(1000000 * fraction))
if tz is not None:
if tz[0] == 'Z':
offset = 0
else:
offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh))
if tz[0] == '-':
offset = -offset
dt = dt.replace(tzinfo=UTCOffset(offset))
return dt | def function[parse_isodate, parameter[datestr]]:
constant[Parse a string that loosely fits ISO 8601 formatted date-time string
]
variable[m] assign[=] call[name[isodate_rx].search, parameter[name[datestr]]]
assert[name[m]]
<ast.Tuple object at 0x7da1b28873d0> assign[=] call[name[m].group, parameter[constant[year], constant[month], constant[day]]]
<ast.Tuple object at 0x7da1b2886770> assign[=] call[name[m].group, parameter[constant[hour], constant[minute], constant[second], constant[fraction]]]
<ast.Tuple object at 0x7da1b28853f0> assign[=] call[name[m].group, parameter[constant[tz], constant[tzhh], constant[tzmm]]]
variable[dt] assign[=] call[name[datetime].datetime, parameter[call[name[int], parameter[name[year]]], call[name[int], parameter[name[month]]], call[name[int], parameter[name[day]]], call[name[int], parameter[name[hour]]]]]
if compare[name[fraction] is constant[None]] begin[:]
variable[fraction] assign[=] constant[0]
if compare[name[minute] is constant[None]] begin[:]
variable[dt] assign[=] call[name[dt].replace, parameter[]]
if compare[name[tz] is_not constant[None]] begin[:]
if compare[call[name[tz]][constant[0]] equal[==] constant[Z]] begin[:]
variable[offset] assign[=] constant[0]
variable[dt] assign[=] call[name[dt].replace, parameter[]]
return[name[dt]] | keyword[def] identifier[parse_isodate] ( identifier[datestr] ):
literal[string]
identifier[m] = identifier[isodate_rx] . identifier[search] ( identifier[datestr] )
keyword[assert] identifier[m] , literal[string] + identifier[datestr]
identifier[year] , identifier[month] , identifier[day] = identifier[m] . identifier[group] ( literal[string] , literal[string] , literal[string] )
identifier[hour] , identifier[minute] , identifier[second] , identifier[fraction] = identifier[m] . identifier[group] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[tz] , identifier[tzhh] , identifier[tzmm] = identifier[m] . identifier[group] ( literal[string] , literal[string] , literal[string] )
identifier[dt] = identifier[datetime] . identifier[datetime] ( identifier[int] ( identifier[year] ), identifier[int] ( identifier[month] ), identifier[int] ( identifier[day] ), identifier[int] ( identifier[hour] ))
keyword[if] identifier[fraction] keyword[is] keyword[None] :
identifier[fraction] = literal[int]
keyword[else] :
identifier[fraction] = identifier[float] ( literal[string] + identifier[fraction] )
keyword[if] identifier[minute] keyword[is] keyword[None] :
identifier[dt] = identifier[dt] . identifier[replace] ( identifier[minute] = identifier[int] ( literal[int] * identifier[fraction] ))
keyword[else] :
identifier[dt] = identifier[dt] . identifier[replace] ( identifier[minute] = identifier[int] ( identifier[minute] ))
keyword[if] identifier[second] keyword[is] keyword[None] :
identifier[dt] = identifier[dt] . identifier[replace] ( identifier[second] = identifier[int] ( literal[int] * identifier[fraction] ))
keyword[else] :
identifier[dt] = identifier[dt] . identifier[replace] ( identifier[second] = identifier[int] ( identifier[second] ), identifier[microsecond] = identifier[int] ( literal[int] * identifier[fraction] ))
keyword[if] identifier[tz] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[tz] [ literal[int] ]== literal[string] :
identifier[offset] = literal[int]
keyword[else] :
identifier[offset] = identifier[datetime] . identifier[timedelta] ( identifier[minutes] = identifier[int] ( identifier[tzmm] keyword[or] literal[int] ), identifier[hours] = identifier[int] ( identifier[tzhh] ))
keyword[if] identifier[tz] [ literal[int] ]== literal[string] :
identifier[offset] =- identifier[offset]
identifier[dt] = identifier[dt] . identifier[replace] ( identifier[tzinfo] = identifier[UTCOffset] ( identifier[offset] ))
keyword[return] identifier[dt] | def parse_isodate(datestr):
"""Parse a string that loosely fits ISO 8601 formatted date-time string
"""
m = isodate_rx.search(datestr)
assert m, 'unrecognized date format: ' + datestr
(year, month, day) = m.group('year', 'month', 'day')
(hour, minute, second, fraction) = m.group('hour', 'minute', 'second', 'fraction')
(tz, tzhh, tzmm) = m.group('tz', 'tzhh', 'tzmm')
dt = datetime.datetime(int(year), int(month), int(day), int(hour))
if fraction is None:
fraction = 0 # depends on [control=['if'], data=['fraction']]
else:
fraction = float('0.' + fraction)
if minute is None:
dt = dt.replace(minute=int(60 * fraction)) # depends on [control=['if'], data=[]]
else:
dt = dt.replace(minute=int(minute))
if second is None:
dt = dt.replace(second=int(60 * fraction)) # depends on [control=['if'], data=[]]
else:
dt = dt.replace(second=int(second), microsecond=int(1000000 * fraction))
if tz is not None:
if tz[0] == 'Z':
offset = 0 # depends on [control=['if'], data=[]]
else:
offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh))
if tz[0] == '-':
offset = -offset # depends on [control=['if'], data=[]]
dt = dt.replace(tzinfo=UTCOffset(offset)) # depends on [control=['if'], data=['tz']]
return dt |
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True | def function[draw, parameter[self]]:
constant[Do not call directly.]
if name[self].hidden begin[:]
return[constant[False]]
if compare[name[self].background_color is_not constant[None]] begin[:]
call[name[render].fillrect, parameter[name[self].surface, name[self].background_color]]
for taget[name[child]] in starred[name[self].children] begin[:]
if <ast.UnaryOp object at 0x7da1b11a15a0> begin[:]
call[name[child].draw, parameter[]]
variable[topleft] assign[=] name[child].frame.topleft
if name[child].shadowed begin[:]
variable[shadow_size] assign[=] name[theme].current.shadow_size
variable[shadow_topleft] assign[=] tuple[[<ast.BinOp object at 0x7da1b11a0be0>, <ast.BinOp object at 0x7da1b11a0040>]]
call[name[self].surface.blit, parameter[name[child].shadow_image, name[shadow_topleft]]]
call[name[self].surface.blit, parameter[name[child].surface, name[topleft]]]
if <ast.BoolOp object at 0x7da1b11a3f40> begin[:]
if <ast.BoolOp object at 0x7da1b11a1b70> begin[:]
call[name[pygame].draw.rect, parameter[name[self].surface, name[child].border_color, name[child].frame, name[child].border_widths]]
return[constant[True]] | keyword[def] identifier[draw] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[hidden] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[background_color] keyword[is] keyword[not] keyword[None] :
identifier[render] . identifier[fillrect] ( identifier[self] . identifier[surface] , identifier[self] . identifier[background_color] ,
identifier[rect] = identifier[pygame] . identifier[Rect] (( literal[int] , literal[int] ), identifier[self] . identifier[frame] . identifier[size] ))
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] :
keyword[if] keyword[not] identifier[child] . identifier[hidden] :
identifier[child] . identifier[draw] ()
identifier[topleft] = identifier[child] . identifier[frame] . identifier[topleft]
keyword[if] identifier[child] . identifier[shadowed] :
identifier[shadow_size] = identifier[theme] . identifier[current] . identifier[shadow_size]
identifier[shadow_topleft] =( identifier[topleft] [ literal[int] ]- identifier[shadow_size] // literal[int] ,
identifier[topleft] [ literal[int] ]- identifier[shadow_size] // literal[int] )
identifier[self] . identifier[surface] . identifier[blit] ( identifier[child] . identifier[shadow_image] , identifier[shadow_topleft] )
identifier[self] . identifier[surface] . identifier[blit] ( identifier[child] . identifier[surface] , identifier[topleft] )
keyword[if] identifier[child] . identifier[border_color] keyword[and] identifier[child] . identifier[border_widths] keyword[is] keyword[not] keyword[None] :
keyword[if] ( identifier[type] ( identifier[child] . identifier[border_widths] ) keyword[is] identifier[int] keyword[and]
identifier[child] . identifier[border_widths] > literal[int] ):
identifier[pygame] . identifier[draw] . identifier[rect] ( identifier[self] . identifier[surface] , identifier[child] . identifier[border_color] ,
identifier[child] . identifier[frame] , identifier[child] . identifier[border_widths] )
keyword[else] :
identifier[tw] , identifier[lw] , identifier[bw] , identifier[rw] = identifier[child] . identifier[get_border_widths] ()
identifier[tl] =( identifier[child] . identifier[frame] . identifier[left] , identifier[child] . identifier[frame] . identifier[top] )
identifier[tr] =( identifier[child] . identifier[frame] . identifier[right] - literal[int] , identifier[child] . identifier[frame] . identifier[top] )
identifier[bl] =( identifier[child] . identifier[frame] . identifier[left] , identifier[child] . identifier[frame] . identifier[bottom] - literal[int] )
identifier[br] =( identifier[child] . identifier[frame] . identifier[right] - literal[int] , identifier[child] . identifier[frame] . identifier[bottom] - literal[int] )
keyword[if] identifier[tw] > literal[int] :
identifier[pygame] . identifier[draw] . identifier[line] ( identifier[self] . identifier[surface] , identifier[child] . identifier[border_color] ,
identifier[tl] , identifier[tr] , identifier[tw] )
keyword[if] identifier[lw] > literal[int] :
identifier[pygame] . identifier[draw] . identifier[line] ( identifier[self] . identifier[surface] , identifier[child] . identifier[border_color] ,
identifier[tl] , identifier[bl] , identifier[lw] )
keyword[if] identifier[bw] > literal[int] :
identifier[pygame] . identifier[draw] . identifier[line] ( identifier[self] . identifier[surface] , identifier[child] . identifier[border_color] ,
identifier[bl] , identifier[br] , identifier[bw] )
keyword[if] identifier[rw] > literal[int] :
identifier[pygame] . identifier[draw] . identifier[line] ( identifier[self] . identifier[surface] , identifier[child] . identifier[border_color] ,
identifier[tr] , identifier[br] , identifier[rw] )
keyword[return] keyword[True] | def draw(self):
"""Do not call directly."""
if self.hidden:
return False # depends on [control=['if'], data=[]]
if self.background_color is not None:
render.fillrect(self.surface, self.background_color, rect=pygame.Rect((0, 0), self.frame.size)) # depends on [control=['if'], data=[]]
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2, topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft) # depends on [control=['if'], data=[]]
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if type(child.border_widths) is int and child.border_widths > 0:
pygame.draw.rect(self.surface, child.border_color, child.frame, child.border_widths) # depends on [control=['if'], data=[]]
else:
(tw, lw, bw, rw) = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color, tl, tr, tw) # depends on [control=['if'], data=['tw']]
if lw > 0:
pygame.draw.line(self.surface, child.border_color, tl, bl, lw) # depends on [control=['if'], data=['lw']]
if bw > 0:
pygame.draw.line(self.surface, child.border_color, bl, br, bw) # depends on [control=['if'], data=['bw']]
if rw > 0:
pygame.draw.line(self.surface, child.border_color, tr, br, rw) # depends on [control=['if'], data=['rw']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
return True |
def _SetExtractionParsersAndPlugins(self, configuration, session):
"""Sets the parsers and plugins before extraction.
Args:
configuration (ProcessingConfiguration): processing configuration.
session (Session): session.
"""
names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=configuration.parser_filter_expression)
session.enabled_parser_names = list(names_generator)
session.parser_filter_expression = configuration.parser_filter_expression | def function[_SetExtractionParsersAndPlugins, parameter[self, configuration, session]]:
constant[Sets the parsers and plugins before extraction.
Args:
configuration (ProcessingConfiguration): processing configuration.
session (Session): session.
]
variable[names_generator] assign[=] call[name[parsers_manager].ParsersManager.GetParserAndPluginNames, parameter[]]
name[session].enabled_parser_names assign[=] call[name[list], parameter[name[names_generator]]]
name[session].parser_filter_expression assign[=] name[configuration].parser_filter_expression | keyword[def] identifier[_SetExtractionParsersAndPlugins] ( identifier[self] , identifier[configuration] , identifier[session] ):
literal[string]
identifier[names_generator] = identifier[parsers_manager] . identifier[ParsersManager] . identifier[GetParserAndPluginNames] (
identifier[parser_filter_expression] = identifier[configuration] . identifier[parser_filter_expression] )
identifier[session] . identifier[enabled_parser_names] = identifier[list] ( identifier[names_generator] )
identifier[session] . identifier[parser_filter_expression] = identifier[configuration] . identifier[parser_filter_expression] | def _SetExtractionParsersAndPlugins(self, configuration, session):
"""Sets the parsers and plugins before extraction.
Args:
configuration (ProcessingConfiguration): processing configuration.
session (Session): session.
"""
names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(parser_filter_expression=configuration.parser_filter_expression)
session.enabled_parser_names = list(names_generator)
session.parser_filter_expression = configuration.parser_filter_expression |
def extract(self, item, article_candidate_list):
"""Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text
"""
list_text = []
# The minimal number of words a text needs to have
min_number_words = 15
# The texts of the article candidates and the respective extractors are saved in a tuple in list_text.
for article_candidate in article_candidate_list:
if article_candidate.text != None:
list_text.append((article_candidate.text, article_candidate.extractor))
# Remove texts that are shorter than min_number_words.
for text_tuple in list_text:
if len(text_tuple[0].split()) < min_number_words:
list_text.remove(text_tuple)
# If there is no value in the list, return None.
if len(list_text) == 0:
return None
# If there is only one solution, return it.
if len(list_text) < 2:
return list_text[0][0]
else:
# If there is more than one solution, do the following:
# Create a list which holds triple of the score and the two extractors
list_score = []
# Compare every text with all other texts at least once
for a, b, in itertools.combinations(list_text, 2):
# Create sets from the texts
set_a = set(a[0].split())
set_b = set(b[0].split())
symmetric_difference_a_b = set_a ^ set_b
intersection_a_b = set_a & set_b
# Replace 0 with -1 in order to elude division by zero
if intersection_a_b == 0:
intersection_a_b = -1
# Create the score. It divides the number of words which are not in both texts by the number of words which
# are in both texts and subtracts the result from 1. The closer to 1 the more similiar they are.
score = 1 - ((len(symmetric_difference_a_b)) / (2 * len(intersection_a_b)))
list_score.append((score, a[1], b[1]))
# Find out which is the highest score
best_score = max(list_score, key=lambda item: item[0])
# If one of the solutions is newspaper return it
if "newspaper" in best_score:
return (list(filter(lambda x: x[1] == "newspaper", list_text))[0][0])
else:
# If not, return the text that is longer
# A list that holds the extracted texts and their extractors which were most similar
top_candidates = []
for tuple in list_text:
if tuple[1] == best_score[1] or tuple[1] == best_score[2]:
top_candidates.append(tuple)
if len(top_candidates[0][0]) > len(top_candidates[1][0]):
return (top_candidates[0][0])
else:
return (top_candidates[1][0]) | def function[extract, parameter[self, item, article_candidate_list]]:
constant[Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text
]
variable[list_text] assign[=] list[[]]
variable[min_number_words] assign[=] constant[15]
for taget[name[article_candidate]] in starred[name[article_candidate_list]] begin[:]
if compare[name[article_candidate].text not_equal[!=] constant[None]] begin[:]
call[name[list_text].append, parameter[tuple[[<ast.Attribute object at 0x7da20e9546a0>, <ast.Attribute object at 0x7da20e955db0>]]]]
for taget[name[text_tuple]] in starred[name[list_text]] begin[:]
if compare[call[name[len], parameter[call[call[name[text_tuple]][constant[0]].split, parameter[]]]] less[<] name[min_number_words]] begin[:]
call[name[list_text].remove, parameter[name[text_tuple]]]
if compare[call[name[len], parameter[name[list_text]]] equal[==] constant[0]] begin[:]
return[constant[None]]
if compare[call[name[len], parameter[name[list_text]]] less[<] constant[2]] begin[:]
return[call[call[name[list_text]][constant[0]]][constant[0]]] | keyword[def] identifier[extract] ( identifier[self] , identifier[item] , identifier[article_candidate_list] ):
literal[string]
identifier[list_text] =[]
identifier[min_number_words] = literal[int]
keyword[for] identifier[article_candidate] keyword[in] identifier[article_candidate_list] :
keyword[if] identifier[article_candidate] . identifier[text] != keyword[None] :
identifier[list_text] . identifier[append] (( identifier[article_candidate] . identifier[text] , identifier[article_candidate] . identifier[extractor] ))
keyword[for] identifier[text_tuple] keyword[in] identifier[list_text] :
keyword[if] identifier[len] ( identifier[text_tuple] [ literal[int] ]. identifier[split] ())< identifier[min_number_words] :
identifier[list_text] . identifier[remove] ( identifier[text_tuple] )
keyword[if] identifier[len] ( identifier[list_text] )== literal[int] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[list_text] )< literal[int] :
keyword[return] identifier[list_text] [ literal[int] ][ literal[int] ]
keyword[else] :
identifier[list_score] =[]
keyword[for] identifier[a] , identifier[b] , keyword[in] identifier[itertools] . identifier[combinations] ( identifier[list_text] , literal[int] ):
identifier[set_a] = identifier[set] ( identifier[a] [ literal[int] ]. identifier[split] ())
identifier[set_b] = identifier[set] ( identifier[b] [ literal[int] ]. identifier[split] ())
identifier[symmetric_difference_a_b] = identifier[set_a] ^ identifier[set_b]
identifier[intersection_a_b] = identifier[set_a] & identifier[set_b]
keyword[if] identifier[intersection_a_b] == literal[int] :
identifier[intersection_a_b] =- literal[int]
identifier[score] = literal[int] -(( identifier[len] ( identifier[symmetric_difference_a_b] ))/( literal[int] * identifier[len] ( identifier[intersection_a_b] )))
identifier[list_score] . identifier[append] (( identifier[score] , identifier[a] [ literal[int] ], identifier[b] [ literal[int] ]))
identifier[best_score] = identifier[max] ( identifier[list_score] , identifier[key] = keyword[lambda] identifier[item] : identifier[item] [ literal[int] ])
keyword[if] literal[string] keyword[in] identifier[best_score] :
keyword[return] ( identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]== literal[string] , identifier[list_text] ))[ literal[int] ][ literal[int] ])
keyword[else] :
identifier[top_candidates] =[]
keyword[for] identifier[tuple] keyword[in] identifier[list_text] :
keyword[if] identifier[tuple] [ literal[int] ]== identifier[best_score] [ literal[int] ] keyword[or] identifier[tuple] [ literal[int] ]== identifier[best_score] [ literal[int] ]:
identifier[top_candidates] . identifier[append] ( identifier[tuple] )
keyword[if] identifier[len] ( identifier[top_candidates] [ literal[int] ][ literal[int] ])> identifier[len] ( identifier[top_candidates] [ literal[int] ][ literal[int] ]):
keyword[return] ( identifier[top_candidates] [ literal[int] ][ literal[int] ])
keyword[else] :
keyword[return] ( identifier[top_candidates] [ literal[int] ][ literal[int] ]) | def extract(self, item, article_candidate_list):
"""Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text
"""
list_text = []
# The minimal number of words a text needs to have
min_number_words = 15
# The texts of the article candidates and the respective extractors are saved in a tuple in list_text.
for article_candidate in article_candidate_list:
if article_candidate.text != None:
list_text.append((article_candidate.text, article_candidate.extractor)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['article_candidate']]
# Remove texts that are shorter than min_number_words.
for text_tuple in list_text:
if len(text_tuple[0].split()) < min_number_words:
list_text.remove(text_tuple) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['text_tuple']]
# If there is no value in the list, return None.
if len(list_text) == 0:
return None # depends on [control=['if'], data=[]]
# If there is only one solution, return it.
if len(list_text) < 2:
return list_text[0][0] # depends on [control=['if'], data=[]]
else:
# If there is more than one solution, do the following:
# Create a list which holds triple of the score and the two extractors
list_score = []
# Compare every text with all other texts at least once
for (a, b) in itertools.combinations(list_text, 2):
# Create sets from the texts
set_a = set(a[0].split())
set_b = set(b[0].split())
symmetric_difference_a_b = set_a ^ set_b
intersection_a_b = set_a & set_b
# Replace 0 with -1 in order to elude division by zero
if intersection_a_b == 0:
intersection_a_b = -1 # depends on [control=['if'], data=['intersection_a_b']]
# Create the score. It divides the number of words which are not in both texts by the number of words which
# are in both texts and subtracts the result from 1. The closer to 1 the more similiar they are.
score = 1 - len(symmetric_difference_a_b) / (2 * len(intersection_a_b))
list_score.append((score, a[1], b[1])) # depends on [control=['for'], data=[]]
# Find out which is the highest score
best_score = max(list_score, key=lambda item: item[0])
# If one of the solutions is newspaper return it
if 'newspaper' in best_score:
return list(filter(lambda x: x[1] == 'newspaper', list_text))[0][0] # depends on [control=['if'], data=[]]
else:
# If not, return the text that is longer
# A list that holds the extracted texts and their extractors which were most similar
top_candidates = []
for tuple in list_text:
if tuple[1] == best_score[1] or tuple[1] == best_score[2]:
top_candidates.append(tuple) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tuple']]
if len(top_candidates[0][0]) > len(top_candidates[1][0]):
return top_candidates[0][0] # depends on [control=['if'], data=[]]
else:
return top_candidates[1][0] |
def sequencebank(self):
"""List of namedtuples representing biological entities defined or
mentioned in the text, in the form (name, sequence_number, type).
"""
path = ['enhancement', 'sequencebanks', 'sequencebank']
items = listify(chained_get(self._head, path, []))
bank = namedtuple('Sequencebank', 'name sequence_number type')
out = []
for item in items:
numbers = listify(item['sequence-number'])
for number in numbers:
new = bank(name=item['@name'], sequence_number=number['$'],
type=number['@type'])
out.append(new)
return out or None | def function[sequencebank, parameter[self]]:
constant[List of namedtuples representing biological entities defined or
mentioned in the text, in the form (name, sequence_number, type).
]
variable[path] assign[=] list[[<ast.Constant object at 0x7da20c795540>, <ast.Constant object at 0x7da20c794b80>, <ast.Constant object at 0x7da20c7963e0>]]
variable[items] assign[=] call[name[listify], parameter[call[name[chained_get], parameter[name[self]._head, name[path], list[[]]]]]]
variable[bank] assign[=] call[name[namedtuple], parameter[constant[Sequencebank], constant[name sequence_number type]]]
variable[out] assign[=] list[[]]
for taget[name[item]] in starred[name[items]] begin[:]
variable[numbers] assign[=] call[name[listify], parameter[call[name[item]][constant[sequence-number]]]]
for taget[name[number]] in starred[name[numbers]] begin[:]
variable[new] assign[=] call[name[bank], parameter[]]
call[name[out].append, parameter[name[new]]]
return[<ast.BoolOp object at 0x7da20c6c7310>] | keyword[def] identifier[sequencebank] ( identifier[self] ):
literal[string]
identifier[path] =[ literal[string] , literal[string] , literal[string] ]
identifier[items] = identifier[listify] ( identifier[chained_get] ( identifier[self] . identifier[_head] , identifier[path] ,[]))
identifier[bank] = identifier[namedtuple] ( literal[string] , literal[string] )
identifier[out] =[]
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[numbers] = identifier[listify] ( identifier[item] [ literal[string] ])
keyword[for] identifier[number] keyword[in] identifier[numbers] :
identifier[new] = identifier[bank] ( identifier[name] = identifier[item] [ literal[string] ], identifier[sequence_number] = identifier[number] [ literal[string] ],
identifier[type] = identifier[number] [ literal[string] ])
identifier[out] . identifier[append] ( identifier[new] )
keyword[return] identifier[out] keyword[or] keyword[None] | def sequencebank(self):
"""List of namedtuples representing biological entities defined or
mentioned in the text, in the form (name, sequence_number, type).
"""
path = ['enhancement', 'sequencebanks', 'sequencebank']
items = listify(chained_get(self._head, path, []))
bank = namedtuple('Sequencebank', 'name sequence_number type')
out = []
for item in items:
numbers = listify(item['sequence-number'])
for number in numbers:
new = bank(name=item['@name'], sequence_number=number['$'], type=number['@type'])
out.append(new) # depends on [control=['for'], data=['number']] # depends on [control=['for'], data=['item']]
return out or None |
def count(args):
"""
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
"""
from bitarray import bitarray
p = OptionParser(count.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, jfdb = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open("tmp", "w")
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = ".".join((fastafile, jfdb, "bin"))
fw = open(binfile, "w")
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile))
fw.close()
sh("rm {0}".format(t.name))
logging.debug("Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".\
format(K, fastafile, jfdb, binfile))
cntfile = ".".join((fastafile, jfdb, "cnt"))
bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)])
logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile)) | def function[count, parameter[args]]:
constant[
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
]
from relative_module[bitarray] import module[bitarray]
variable[p] assign[=] call[name[OptionParser], parameter[name[count].__doc__]]
<ast.Tuple object at 0x7da1b08c9540> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08c85e0>]]
<ast.Tuple object at 0x7da1b08cb640> assign[=] name[args]
variable[K] assign[=] call[name[get_K], parameter[name[jfdb]]]
variable[cmd] assign[=] call[constant[jellyfish query {0} -C | cut -d' ' -f 2].format, parameter[name[jfdb]]]
variable[t] assign[=] call[name[must_open], parameter[constant[tmp], constant[w]]]
variable[proc] assign[=] call[name[Popen], parameter[name[cmd]]]
call[name[t].flush, parameter[]]
variable[f] assign[=] call[name[Fasta], parameter[name[fastafile]]]
for taget[tuple[[<ast.Name object at 0x7da1b08c9690>, <ast.Name object at 0x7da1b08c9f60>]]] in starred[call[name[f].iteritems_ordered, parameter[]]] begin[:]
variable[kmers] assign[=] call[name[list], parameter[call[name[make_kmers], parameter[name[rec].seq, name[K]]]]]
call[name[print], parameter[call[constant[
].join, parameter[name[kmers]]]]]
call[name[proc].stdin.close, parameter[]]
call[name[logging].debug, parameter[name[cmd]]]
call[name[proc].wait, parameter[]]
variable[a] assign[=] call[name[bitarray], parameter[]]
variable[binfile] assign[=] call[constant[.].join, parameter[tuple[[<ast.Name object at 0x7da1b08c9000>, <ast.Name object at 0x7da1b08c93c0>, <ast.Constant object at 0x7da1b08c9270>]]]]
variable[fw] assign[=] call[name[open], parameter[name[binfile], constant[w]]]
call[name[t].seek, parameter[constant[0]]]
for taget[name[row]] in starred[name[t]] begin[:]
variable[c] assign[=] call[name[row].strip, parameter[]]
call[name[a].append, parameter[call[name[int], parameter[name[c]]]]]
call[name[a].tofile, parameter[name[fw]]]
call[name[logging].debug, parameter[call[constant[Serialize {0} bits to `{1}`.].format, parameter[call[name[len], parameter[name[a]]], name[binfile]]]]]
call[name[fw].close, parameter[]]
call[name[sh], parameter[call[constant[rm {0}].format, parameter[name[t].name]]]]
call[name[logging].debug, parameter[call[constant[Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.].format, parameter[name[K], name[fastafile], name[jfdb], name[binfile]]]]]
variable[cntfile] assign[=] call[constant[.].join, parameter[tuple[[<ast.Name object at 0x7da1b0900160>, <ast.Name object at 0x7da1b09030d0>, <ast.Constant object at 0x7da1b0903730>]]]]
call[name[bincount], parameter[list[[<ast.Name object at 0x7da1b0901fc0>, <ast.Name object at 0x7da1b0901780>, <ast.Constant object at 0x7da1b09020e0>, <ast.Name object at 0x7da1b0901960>, <ast.Call object at 0x7da1b0901ed0>]]]]
call[name[logging].debug, parameter[call[constant[Shared K-mer counts written to `{0}`.].format, parameter[name[cntfile]]]]] | keyword[def] identifier[count] ( identifier[args] ):
literal[string]
keyword[from] identifier[bitarray] keyword[import] identifier[bitarray]
identifier[p] = identifier[OptionParser] ( identifier[count] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastafile] , identifier[jfdb] = identifier[args]
identifier[K] = identifier[get_K] ( identifier[jfdb] )
identifier[cmd] = literal[string] . identifier[format] ( identifier[jfdb] )
identifier[t] = identifier[must_open] ( literal[string] , literal[string] )
identifier[proc] = identifier[Popen] ( identifier[cmd] , identifier[stdin] = identifier[PIPE] , identifier[stdout] = identifier[t] )
identifier[t] . identifier[flush] ()
identifier[f] = identifier[Fasta] ( identifier[fastafile] , identifier[lazy] = keyword[True] )
keyword[for] identifier[name] , identifier[rec] keyword[in] identifier[f] . identifier[iteritems_ordered] ():
identifier[kmers] = identifier[list] ( identifier[make_kmers] ( identifier[rec] . identifier[seq] , identifier[K] ))
identifier[print] ( literal[string] . identifier[join] ( identifier[kmers] ), identifier[file] = identifier[proc] . identifier[stdin] )
identifier[proc] . identifier[stdin] . identifier[close] ()
identifier[logging] . identifier[debug] ( identifier[cmd] )
identifier[proc] . identifier[wait] ()
identifier[a] = identifier[bitarray] ()
identifier[binfile] = literal[string] . identifier[join] (( identifier[fastafile] , identifier[jfdb] , literal[string] ))
identifier[fw] = identifier[open] ( identifier[binfile] , literal[string] )
identifier[t] . identifier[seek] ( literal[int] )
keyword[for] identifier[row] keyword[in] identifier[t] :
identifier[c] = identifier[row] . identifier[strip] ()
identifier[a] . identifier[append] ( identifier[int] ( identifier[c] ))
identifier[a] . identifier[tofile] ( identifier[fw] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[a] ), identifier[binfile] ))
identifier[fw] . identifier[close] ()
identifier[sh] ( literal[string] . identifier[format] ( identifier[t] . identifier[name] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[K] , identifier[fastafile] , identifier[jfdb] , identifier[binfile] ))
identifier[cntfile] = literal[string] . identifier[join] (( identifier[fastafile] , identifier[jfdb] , literal[string] ))
identifier[bincount] ([ identifier[fastafile] , identifier[binfile] , literal[string] , identifier[cntfile] , literal[string] . identifier[format] ( identifier[K] )])
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[cntfile] )) | def count(args):
"""
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
"""
from bitarray import bitarray
p = OptionParser(count.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(fastafile, jfdb) = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open('tmp', 'w')
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for (name, rec) in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print('\n'.join(kmers), file=proc.stdin) # depends on [control=['for'], data=[]]
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = '.'.join((fastafile, jfdb, 'bin'))
fw = open(binfile, 'w')
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c)) # depends on [control=['for'], data=['row']]
a.tofile(fw)
logging.debug('Serialize {0} bits to `{1}`.'.format(len(a), binfile))
fw.close()
sh('rm {0}'.format(t.name))
logging.debug('Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.'.format(K, fastafile, jfdb, binfile))
cntfile = '.'.join((fastafile, jfdb, 'cnt'))
bincount([fastafile, binfile, '-o', cntfile, '-K {0}'.format(K)])
logging.debug('Shared K-mer counts written to `{0}`.'.format(cntfile)) |
def _get_initial_states(self,
batch_size: int,
num_valid: int,
sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0),
num_states_to_concat,
state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous()
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states) | def function[_get_initial_states, parameter[self, batch_size, num_valid, sorting_indices]]:
constant[
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
]
if compare[name[self]._states is constant[None]] begin[:]
return[constant[None]]
if compare[name[batch_size] greater[>] call[call[name[self]._states][constant[0]].size, parameter[constant[1]]]] begin[:]
variable[num_states_to_concat] assign[=] binary_operation[name[batch_size] - call[call[name[self]._states][constant[0]].size, parameter[constant[1]]]]
variable[resized_states] assign[=] list[[]]
for taget[name[state]] in starred[name[self]._states] begin[:]
variable[zeros] assign[=] call[name[state].new_zeros, parameter[call[name[state].size, parameter[constant[0]]], name[num_states_to_concat], call[name[state].size, parameter[constant[2]]]]]
call[name[resized_states].append, parameter[call[name[torch].cat, parameter[list[[<ast.Name object at 0x7da2043469e0>, <ast.Name object at 0x7da204344040>]], constant[1]]]]]
name[self]._states assign[=] call[name[tuple], parameter[name[resized_states]]]
variable[correctly_shaped_states] assign[=] name[self]._states
if compare[call[name[len], parameter[name[self]._states]] equal[==] constant[1]] begin[:]
variable[correctly_shaped_state] assign[=] call[name[correctly_shaped_states]][constant[0]]
variable[sorted_state] assign[=] call[name[correctly_shaped_state].index_select, parameter[constant[1], name[sorting_indices]]]
return[call[call[name[sorted_state]][tuple[[<ast.Slice object at 0x7da1b1e15ed0>, <ast.Slice object at 0x7da1b1e17b80>, <ast.Slice object at 0x7da1b1e14100>]]].contiguous, parameter[]]] | keyword[def] identifier[_get_initial_states] ( identifier[self] ,
identifier[batch_size] : identifier[int] ,
identifier[num_valid] : identifier[int] ,
identifier[sorting_indices] : identifier[torch] . identifier[LongTensor] )-> identifier[Optional] [ identifier[RnnState] ]:
literal[string]
keyword[if] identifier[self] . identifier[_states] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[batch_size] > identifier[self] . identifier[_states] [ literal[int] ]. identifier[size] ( literal[int] ):
identifier[num_states_to_concat] = identifier[batch_size] - identifier[self] . identifier[_states] [ literal[int] ]. identifier[size] ( literal[int] )
identifier[resized_states] =[]
keyword[for] identifier[state] keyword[in] identifier[self] . identifier[_states] :
identifier[zeros] = identifier[state] . identifier[new_zeros] ( identifier[state] . identifier[size] ( literal[int] ),
identifier[num_states_to_concat] ,
identifier[state] . identifier[size] ( literal[int] ))
identifier[resized_states] . identifier[append] ( identifier[torch] . identifier[cat] ([ identifier[state] , identifier[zeros] ], literal[int] ))
identifier[self] . identifier[_states] = identifier[tuple] ( identifier[resized_states] )
identifier[correctly_shaped_states] = identifier[self] . identifier[_states]
keyword[elif] identifier[batch_size] < identifier[self] . identifier[_states] [ literal[int] ]. identifier[size] ( literal[int] ):
identifier[correctly_shaped_states] = identifier[tuple] ( identifier[state] [:,: identifier[batch_size] ,:] keyword[for] identifier[state] keyword[in] identifier[self] . identifier[_states] )
keyword[else] :
identifier[correctly_shaped_states] = identifier[self] . identifier[_states]
keyword[if] identifier[len] ( identifier[self] . identifier[_states] )== literal[int] :
identifier[correctly_shaped_state] = identifier[correctly_shaped_states] [ literal[int] ]
identifier[sorted_state] = identifier[correctly_shaped_state] . identifier[index_select] ( literal[int] , identifier[sorting_indices] )
keyword[return] identifier[sorted_state] [:,: identifier[num_valid] ,:]. identifier[contiguous] ()
keyword[else] :
identifier[sorted_states] =[ identifier[state] . identifier[index_select] ( literal[int] , identifier[sorting_indices] )
keyword[for] identifier[state] keyword[in] identifier[correctly_shaped_states] ]
keyword[return] identifier[tuple] ( identifier[state] [:,: identifier[num_valid] ,:]. identifier[contiguous] () keyword[for] identifier[state] keyword[in] identifier[sorted_states] ) | def _get_initial_states(self, batch_size: int, num_valid: int, sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None # depends on [control=['if'], data=[]]
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0), num_states_to_concat, state.size(2))
resized_states.append(torch.cat([state, zeros], 1)) # depends on [control=['for'], data=['state']]
self._states = tuple(resized_states)
correctly_shaped_states = self._states # depends on [control=['if'], data=['batch_size']]
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple((state[:, :batch_size, :] for state in self._states)) # depends on [control=['if'], data=['batch_size']]
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous() # depends on [control=['if'], data=[]]
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices) for state in correctly_shaped_states]
return tuple((state[:, :num_valid, :].contiguous() for state in sorted_states)) |
def logical_intf_helper(interface):
"""
Logical Interface finder by name. Create if it doesn't exist.
This is useful when adding logical interfaces to for inline
or capture interfaces.
:param interface: logical interface name
:return str href: href of logical interface
"""
if interface is None:
return LogicalInterface.get_or_create(name='default_eth').href
elif isinstance(interface, LogicalInterface):
return interface.href
elif interface.startswith('http'):
return interface
return LogicalInterface.get_or_create(name=interface).href | def function[logical_intf_helper, parameter[interface]]:
constant[
Logical Interface finder by name. Create if it doesn't exist.
This is useful when adding logical interfaces to for inline
or capture interfaces.
:param interface: logical interface name
:return str href: href of logical interface
]
if compare[name[interface] is constant[None]] begin[:]
return[call[name[LogicalInterface].get_or_create, parameter[]].href]
return[call[name[LogicalInterface].get_or_create, parameter[]].href] | keyword[def] identifier[logical_intf_helper] ( identifier[interface] ):
literal[string]
keyword[if] identifier[interface] keyword[is] keyword[None] :
keyword[return] identifier[LogicalInterface] . identifier[get_or_create] ( identifier[name] = literal[string] ). identifier[href]
keyword[elif] identifier[isinstance] ( identifier[interface] , identifier[LogicalInterface] ):
keyword[return] identifier[interface] . identifier[href]
keyword[elif] identifier[interface] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[interface]
keyword[return] identifier[LogicalInterface] . identifier[get_or_create] ( identifier[name] = identifier[interface] ). identifier[href] | def logical_intf_helper(interface):
"""
Logical Interface finder by name. Create if it doesn't exist.
This is useful when adding logical interfaces to for inline
or capture interfaces.
:param interface: logical interface name
:return str href: href of logical interface
"""
if interface is None:
return LogicalInterface.get_or_create(name='default_eth').href # depends on [control=['if'], data=[]]
elif isinstance(interface, LogicalInterface):
return interface.href # depends on [control=['if'], data=[]]
elif interface.startswith('http'):
return interface # depends on [control=['if'], data=[]]
return LogicalInterface.get_or_create(name=interface).href |
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text) | def function[create_comment, parameter[self, text]]:
constant[Mimic issue API, so we can use it everywhere.
Return dashboard comment.
]
return[call[name[DashboardComment].get_or_create, parameter[name[self]._issue_or_pr, name[self]._header, name[text]]]] | keyword[def] identifier[create_comment] ( identifier[self] , identifier[text] ):
literal[string]
keyword[return] identifier[DashboardComment] . identifier[get_or_create] ( identifier[self] . identifier[_issue_or_pr] , identifier[self] . identifier[_header] , identifier[text] ) | def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text) |
def colorize_ansi(msg, color=None, style=None):
"""colorize message by wrapping it with ansi escape codes
:type msg: str or unicode
:param msg: the message string to colorize
:type color: str or None
:param color:
the color identifier (see `ANSI_COLORS` for available values)
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str or unicode
:return: the ansi escaped string
"""
# If both color and style are not defined, then leave the text as is
if color is None and style is None:
return msg
escape_code = _get_ansi_code(color, style)
# If invalid (or unknown) color, don't wrap msg with ansi codes
if escape_code:
return "%s%s%s" % (escape_code, msg, ANSI_RESET)
return msg | def function[colorize_ansi, parameter[msg, color, style]]:
constant[colorize message by wrapping it with ansi escape codes
:type msg: str or unicode
:param msg: the message string to colorize
:type color: str or None
:param color:
the color identifier (see `ANSI_COLORS` for available values)
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str or unicode
:return: the ansi escaped string
]
if <ast.BoolOp object at 0x7da1b0285e40> begin[:]
return[name[msg]]
variable[escape_code] assign[=] call[name[_get_ansi_code], parameter[name[color], name[style]]]
if name[escape_code] begin[:]
return[binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b025fa00>, <ast.Name object at 0x7da1b025fbe0>, <ast.Name object at 0x7da1b025f1f0>]]]]
return[name[msg]] | keyword[def] identifier[colorize_ansi] ( identifier[msg] , identifier[color] = keyword[None] , identifier[style] = keyword[None] ):
literal[string]
keyword[if] identifier[color] keyword[is] keyword[None] keyword[and] identifier[style] keyword[is] keyword[None] :
keyword[return] identifier[msg]
identifier[escape_code] = identifier[_get_ansi_code] ( identifier[color] , identifier[style] )
keyword[if] identifier[escape_code] :
keyword[return] literal[string] %( identifier[escape_code] , identifier[msg] , identifier[ANSI_RESET] )
keyword[return] identifier[msg] | def colorize_ansi(msg, color=None, style=None):
"""colorize message by wrapping it with ansi escape codes
:type msg: str or unicode
:param msg: the message string to colorize
:type color: str or None
:param color:
the color identifier (see `ANSI_COLORS` for available values)
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str or unicode
:return: the ansi escaped string
"""
# If both color and style are not defined, then leave the text as is
if color is None and style is None:
return msg # depends on [control=['if'], data=[]]
escape_code = _get_ansi_code(color, style)
# If invalid (or unknown) color, don't wrap msg with ansi codes
if escape_code:
return '%s%s%s' % (escape_code, msg, ANSI_RESET) # depends on [control=['if'], data=[]]
return msg |
def _DeserializeResponse(self, payload):
"""Convert string into Response and content.
Args:
payload: Header and body string to be deserialized.
Returns:
A Response object
"""
# Strip off the status line.
status_line, payload = payload.split('\n', 1)
_, status, _ = status_line.split(' ', 2)
# Parse the rest of the response.
parser = email_parser.Parser()
msg = parser.parsestr(payload)
# Get the headers.
info = dict(msg)
info['status'] = status
# Create Response from the parsed headers.
content = msg.get_payload()
return http_wrapper.Response(info, content, self.__batch_url) | def function[_DeserializeResponse, parameter[self, payload]]:
constant[Convert string into Response and content.
Args:
payload: Header and body string to be deserialized.
Returns:
A Response object
]
<ast.Tuple object at 0x7da1b085a920> assign[=] call[name[payload].split, parameter[constant[
], constant[1]]]
<ast.Tuple object at 0x7da1b085bc10> assign[=] call[name[status_line].split, parameter[constant[ ], constant[2]]]
variable[parser] assign[=] call[name[email_parser].Parser, parameter[]]
variable[msg] assign[=] call[name[parser].parsestr, parameter[name[payload]]]
variable[info] assign[=] call[name[dict], parameter[name[msg]]]
call[name[info]][constant[status]] assign[=] name[status]
variable[content] assign[=] call[name[msg].get_payload, parameter[]]
return[call[name[http_wrapper].Response, parameter[name[info], name[content], name[self].__batch_url]]] | keyword[def] identifier[_DeserializeResponse] ( identifier[self] , identifier[payload] ):
literal[string]
identifier[status_line] , identifier[payload] = identifier[payload] . identifier[split] ( literal[string] , literal[int] )
identifier[_] , identifier[status] , identifier[_] = identifier[status_line] . identifier[split] ( literal[string] , literal[int] )
identifier[parser] = identifier[email_parser] . identifier[Parser] ()
identifier[msg] = identifier[parser] . identifier[parsestr] ( identifier[payload] )
identifier[info] = identifier[dict] ( identifier[msg] )
identifier[info] [ literal[string] ]= identifier[status]
identifier[content] = identifier[msg] . identifier[get_payload] ()
keyword[return] identifier[http_wrapper] . identifier[Response] ( identifier[info] , identifier[content] , identifier[self] . identifier[__batch_url] ) | def _DeserializeResponse(self, payload):
"""Convert string into Response and content.
Args:
payload: Header and body string to be deserialized.
Returns:
A Response object
"""
# Strip off the status line.
(status_line, payload) = payload.split('\n', 1)
(_, status, _) = status_line.split(' ', 2)
# Parse the rest of the response.
parser = email_parser.Parser()
msg = parser.parsestr(payload)
# Get the headers.
info = dict(msg)
info['status'] = status
# Create Response from the parsed headers.
content = msg.get_payload()
return http_wrapper.Response(info, content, self.__batch_url) |
def parse_contexts(contexts):
"""
Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs
For example, the JSON
{
"data": [
{
"data": {
"unique": true
},
"schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 1
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 2
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
}
],
"schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0"
}
would become
[
("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]),
("context_com_acme_unduplicated_1", [{"unique": true}])
]
"""
my_json = json.loads(contexts)
data = my_json['data']
distinct_contexts = {}
for context in data:
schema = fix_schema("contexts", context['schema'])
inner_data = context['data']
if schema not in distinct_contexts:
distinct_contexts[schema] = [inner_data]
else:
distinct_contexts[schema].append(inner_data)
output = []
for key in distinct_contexts:
output.append((key, distinct_contexts[key]))
return output | def function[parse_contexts, parameter[contexts]]:
constant[
Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs
For example, the JSON
{
"data": [
{
"data": {
"unique": true
},
"schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 1
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 2
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
}
],
"schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0"
}
would become
[
("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]),
("context_com_acme_unduplicated_1", [{"unique": true}])
]
]
variable[my_json] assign[=] call[name[json].loads, parameter[name[contexts]]]
variable[data] assign[=] call[name[my_json]][constant[data]]
variable[distinct_contexts] assign[=] dictionary[[], []]
for taget[name[context]] in starred[name[data]] begin[:]
variable[schema] assign[=] call[name[fix_schema], parameter[constant[contexts], call[name[context]][constant[schema]]]]
variable[inner_data] assign[=] call[name[context]][constant[data]]
if compare[name[schema] <ast.NotIn object at 0x7da2590d7190> name[distinct_contexts]] begin[:]
call[name[distinct_contexts]][name[schema]] assign[=] list[[<ast.Name object at 0x7da1b02c0bb0>]]
variable[output] assign[=] list[[]]
for taget[name[key]] in starred[name[distinct_contexts]] begin[:]
call[name[output].append, parameter[tuple[[<ast.Name object at 0x7da1b02c0eb0>, <ast.Subscript object at 0x7da1b02c0160>]]]]
return[name[output]] | keyword[def] identifier[parse_contexts] ( identifier[contexts] ):
literal[string]
identifier[my_json] = identifier[json] . identifier[loads] ( identifier[contexts] )
identifier[data] = identifier[my_json] [ literal[string] ]
identifier[distinct_contexts] ={}
keyword[for] identifier[context] keyword[in] identifier[data] :
identifier[schema] = identifier[fix_schema] ( literal[string] , identifier[context] [ literal[string] ])
identifier[inner_data] = identifier[context] [ literal[string] ]
keyword[if] identifier[schema] keyword[not] keyword[in] identifier[distinct_contexts] :
identifier[distinct_contexts] [ identifier[schema] ]=[ identifier[inner_data] ]
keyword[else] :
identifier[distinct_contexts] [ identifier[schema] ]. identifier[append] ( identifier[inner_data] )
identifier[output] =[]
keyword[for] identifier[key] keyword[in] identifier[distinct_contexts] :
identifier[output] . identifier[append] (( identifier[key] , identifier[distinct_contexts] [ identifier[key] ]))
keyword[return] identifier[output] | def parse_contexts(contexts):
"""
Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs
For example, the JSON
{
"data": [
{
"data": {
"unique": true
},
"schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 1
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
},
{
"data": {
"value": 2
},
"schema": "iglu:com.acme/duplicated/jsonschema/1-0-0"
}
],
"schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0"
}
would become
[
("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]),
("context_com_acme_unduplicated_1", [{"unique": true}])
]
"""
my_json = json.loads(contexts)
data = my_json['data']
distinct_contexts = {}
for context in data:
schema = fix_schema('contexts', context['schema'])
inner_data = context['data']
if schema not in distinct_contexts:
distinct_contexts[schema] = [inner_data] # depends on [control=['if'], data=['schema', 'distinct_contexts']]
else:
distinct_contexts[schema].append(inner_data) # depends on [control=['for'], data=['context']]
output = []
for key in distinct_contexts:
output.append((key, distinct_contexts[key])) # depends on [control=['for'], data=['key']]
return output |
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result | def function[_action_get, parameter[self, ids]]:
constant[Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
]
if <ast.UnaryOp object at 0x7da20c76f850> begin[:]
variable[ids] assign[=] name[self].jobs
variable[result] assign[=] list[[]]
variable[ids] assign[=] call[name[set], parameter[name[ids]]]
while name[ids] begin[:]
variable[id_] assign[=] call[name[ids].pop, parameter[]]
if compare[name[id_] is constant[None]] begin[:]
continue
<ast.Try object at 0x7da207f981f0>
<ast.Try object at 0x7da207f98640>
if compare[call[name[payload]][constant[type]] equal[==] constant[group]] begin[:]
for taget[name[obj]] in starred[call[name[self].traverse, parameter[name[id_]]]] begin[:]
call[name[ids].add, parameter[call[name[obj]][constant[id]]]]
return[name[result]] | keyword[def] identifier[_action_get] ( identifier[self] , identifier[ids] ):
literal[string]
keyword[if] keyword[not] identifier[ids] :
identifier[ids] = identifier[self] . identifier[jobs]
identifier[result] =[]
identifier[ids] = identifier[set] ( identifier[ids] )
keyword[while] identifier[ids] :
identifier[id_] = identifier[ids] . identifier[pop] ()
keyword[if] identifier[id_] keyword[is] keyword[None] :
keyword[continue]
keyword[try] :
identifier[payload] = identifier[r_client] . identifier[get] ( identifier[id_] )
keyword[except] identifier[ResponseError] :
keyword[continue]
keyword[try] :
identifier[payload] = identifier[self] . identifier[_decode] ( identifier[payload] )
keyword[except] identifier[ValueError] :
keyword[continue]
keyword[else] :
identifier[result] . identifier[append] ( identifier[payload] )
keyword[if] identifier[payload] [ literal[string] ]== literal[string] :
keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[traverse] ( identifier[id_] ):
identifier[ids] . identifier[add] ( identifier[obj] [ literal[string] ])
keyword[return] identifier[result] | def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs # depends on [control=['if'], data=[]]
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue # depends on [control=['if'], data=[]]
try:
payload = r_client.get(id_) # depends on [control=['try'], data=[]]
except ResponseError:
# wrong key type
continue # depends on [control=['except'], data=[]]
try:
payload = self._decode(payload) # depends on [control=['try'], data=[]]
except ValueError:
# unable to decode or data doesn't exist in redis
continue # depends on [control=['except'], data=[]]
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id']) # depends on [control=['for'], data=['obj']] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return result |
def universal_transformer_with_gru_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a gru as transition function.
It's kind of like having a gru, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: not used here
- memory: not used here
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: not uesed
memory: not used
"""
state, unused_inputs, unused_memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# state (ut_state): output of the gru in the previous step
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let gru count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("gru"):
# gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1})
transition_function_update_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="update",
bias_initializer=tf.constant_initializer(1.0),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("gru_update_gate",
tf.reduce_mean(transition_function_update_gate))
# gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1})
transition_function_reset_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="reset",
bias_initializer=tf.constant_initializer(1.0),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("gru_reset_gate",
tf.reduce_mean(transition_function_reset_gate))
reset_state = transition_function_reset_gate * state
# gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1})
transition_function_candidate = _ffn_layer_multi_inputs(
[transition_function_input, reset_state],
hparams,
name="candidate",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_output = (
(1 - transition_function_update_gate) * transition_function_input +
transition_function_update_gate * transition_function_candidate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, unused_memory | def function[universal_transformer_with_gru_as_transition_function, parameter[layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover]]:
constant[Universal Transformer which uses a gru as transition function.
It's kind of like having a gru, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: not used here
- memory: not used here
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: not uesed
memory: not used
]
<ast.Tuple object at 0x7da1b20e74f0> assign[=] call[name[tf].unstack, parameter[name[layer_inputs]]]
assert[<ast.UnaryOp object at 0x7da1b20e4190>]
variable[mh_attention_input] assign[=] call[name[step_preprocess], parameter[name[state], name[step], name[hparams]]]
variable[transition_function_input] assign[=] call[name[attention_unit], parameter[name[mh_attention_input]]]
if name[hparams].add_ffn_unit_to_the_transition_function begin[:]
variable[transition_function_input] assign[=] call[name[ffn_unit], parameter[name[transition_function_input]]]
variable[transition_function_input] assign[=] call[name[common_layers].layer_preprocess, parameter[name[transition_function_input], name[hparams]]]
with call[name[tf].variable_scope, parameter[constant[gru]]] begin[:]
variable[transition_function_update_gate] assign[=] call[name[_ffn_layer_multi_inputs], parameter[list[[<ast.Name object at 0x7da1b20e5db0>, <ast.Name object at 0x7da1b20e7ca0>]], name[hparams]]]
call[name[tf].contrib.summary.scalar, parameter[constant[gru_update_gate], call[name[tf].reduce_mean, parameter[name[transition_function_update_gate]]]]]
variable[transition_function_reset_gate] assign[=] call[name[_ffn_layer_multi_inputs], parameter[list[[<ast.Name object at 0x7da1b20e6c80>, <ast.Name object at 0x7da1b20e4cd0>]], name[hparams]]]
call[name[tf].contrib.summary.scalar, parameter[constant[gru_reset_gate], call[name[tf].reduce_mean, parameter[name[transition_function_reset_gate]]]]]
variable[reset_state] assign[=] binary_operation[name[transition_function_reset_gate] * name[state]]
variable[transition_function_candidate] assign[=] call[name[_ffn_layer_multi_inputs], parameter[list[[<ast.Name object at 0x7da1b20e5780>, <ast.Name object at 0x7da1b20e4280>]], name[hparams]]]
variable[transition_function_output] assign[=] binary_operation[binary_operation[binary_operation[constant[1] - name[transition_function_update_gate]] * name[transition_function_input]] + binary_operation[name[transition_function_update_gate] * name[transition_function_candidate]]]
variable[transition_function_output] assign[=] call[name[common_layers].layer_preprocess, parameter[name[transition_function_output], name[hparams]]]
return[tuple[[<ast.Name object at 0x7da1b205b5b0>, <ast.Name object at 0x7da1b2059cf0>, <ast.Name object at 0x7da1b205b5e0>]]] | keyword[def] identifier[universal_transformer_with_gru_as_transition_function] (
identifier[layer_inputs] , identifier[step] , identifier[hparams] , identifier[ffn_unit] , identifier[attention_unit] , identifier[pad_remover] = keyword[None] ):
literal[string]
identifier[state] , identifier[unused_inputs] , identifier[unused_memory] = identifier[tf] . identifier[unstack] (
identifier[layer_inputs] , identifier[num] = keyword[None] , identifier[axis] = literal[int] , identifier[name] = literal[string] )
keyword[assert] keyword[not] identifier[hparams] . identifier[add_step_timing_signal]
identifier[mh_attention_input] = identifier[step_preprocess] ( identifier[state] , identifier[step] , identifier[hparams] )
identifier[transition_function_input] = identifier[attention_unit] ( identifier[mh_attention_input] )
keyword[if] identifier[hparams] . identifier[add_ffn_unit_to_the_transition_function] :
identifier[transition_function_input] = identifier[ffn_unit] ( identifier[transition_function_input] )
identifier[transition_function_input] = identifier[common_layers] . identifier[layer_preprocess] (
identifier[transition_function_input] , identifier[hparams] )
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] ):
identifier[transition_function_update_gate] = identifier[_ffn_layer_multi_inputs] (
[ identifier[transition_function_input] , identifier[state] ],
identifier[hparams] ,
identifier[name] = literal[string] ,
identifier[bias_initializer] = identifier[tf] . identifier[constant_initializer] ( literal[int] ),
identifier[activation] = identifier[tf] . identifier[sigmoid] ,
identifier[pad_remover] = identifier[pad_remover] ,
identifier[preprocess] = keyword[False] ,
identifier[postprocess] = keyword[False] )
identifier[tf] . identifier[contrib] . identifier[summary] . identifier[scalar] ( literal[string] ,
identifier[tf] . identifier[reduce_mean] ( identifier[transition_function_update_gate] ))
identifier[transition_function_reset_gate] = identifier[_ffn_layer_multi_inputs] (
[ identifier[transition_function_input] , identifier[state] ],
identifier[hparams] ,
identifier[name] = literal[string] ,
identifier[bias_initializer] = identifier[tf] . identifier[constant_initializer] ( literal[int] ),
identifier[activation] = identifier[tf] . identifier[sigmoid] ,
identifier[pad_remover] = identifier[pad_remover] ,
identifier[preprocess] = keyword[False] ,
identifier[postprocess] = keyword[False] )
identifier[tf] . identifier[contrib] . identifier[summary] . identifier[scalar] ( literal[string] ,
identifier[tf] . identifier[reduce_mean] ( identifier[transition_function_reset_gate] ))
identifier[reset_state] = identifier[transition_function_reset_gate] * identifier[state]
identifier[transition_function_candidate] = identifier[_ffn_layer_multi_inputs] (
[ identifier[transition_function_input] , identifier[reset_state] ],
identifier[hparams] ,
identifier[name] = literal[string] ,
identifier[bias_initializer] = identifier[tf] . identifier[zeros_initializer] (),
identifier[activation] = identifier[tf] . identifier[tanh] ,
identifier[pad_remover] = identifier[pad_remover] ,
identifier[preprocess] = keyword[False] ,
identifier[postprocess] = keyword[False] )
identifier[transition_function_output] =(
( literal[int] - identifier[transition_function_update_gate] )* identifier[transition_function_input] +
identifier[transition_function_update_gate] * identifier[transition_function_candidate] )
identifier[transition_function_output] = identifier[common_layers] . identifier[layer_preprocess] (
identifier[transition_function_output] , identifier[hparams] )
keyword[return] identifier[transition_function_output] , identifier[unused_inputs] , identifier[unused_memory] | def universal_transformer_with_gru_as_transition_function(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a gru as transition function.
It's kind of like having a gru, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: not used here
- memory: not used here
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: not uesed
memory: not used
"""
(state, unused_inputs, unused_memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack')
# state (ut_state): output of the gru in the previous step
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let gru count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input) # depends on [control=['if'], data=[]]
transition_function_input = common_layers.layer_preprocess(transition_function_input, hparams)
with tf.variable_scope('gru'):
# gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1})
transition_function_update_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='update', bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)
tf.contrib.summary.scalar('gru_update_gate', tf.reduce_mean(transition_function_update_gate))
# gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1})
transition_function_reset_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='reset', bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)
tf.contrib.summary.scalar('gru_reset_gate', tf.reduce_mean(transition_function_reset_gate))
reset_state = transition_function_reset_gate * state
# gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1})
transition_function_candidate = _ffn_layer_multi_inputs([transition_function_input, reset_state], hparams, name='candidate', bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False)
transition_function_output = (1 - transition_function_update_gate) * transition_function_input + transition_function_update_gate * transition_function_candidate # depends on [control=['with'], data=[]]
transition_function_output = common_layers.layer_preprocess(transition_function_output, hparams)
return (transition_function_output, unused_inputs, unused_memory) |
async def _assert_link_secret(self, action: str) -> str:
"""
Return current wallet link secret label. Raise AbsentLinkSecret if link secret is not set.
:param action: action requiring link secret
"""
rv = await self.wallet.get_link_secret_label()
if rv is None:
LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)
raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action))
return rv | <ast.AsyncFunctionDef object at 0x7da20c6c5090> | keyword[async] keyword[def] identifier[_assert_link_secret] ( identifier[self] , identifier[action] : identifier[str] )-> identifier[str] :
literal[string]
identifier[rv] = keyword[await] identifier[self] . identifier[wallet] . identifier[get_link_secret_label] ()
keyword[if] identifier[rv] keyword[is] keyword[None] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[action] )
keyword[raise] identifier[AbsentLinkSecret] ( literal[string] . identifier[format] ( identifier[action] ))
keyword[return] identifier[rv] | async def _assert_link_secret(self, action: str) -> str:
"""
Return current wallet link secret label. Raise AbsentLinkSecret if link secret is not set.
:param action: action requiring link secret
"""
rv = await self.wallet.get_link_secret_label()
if rv is None:
LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)
raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action)) # depends on [control=['if'], data=[]]
return rv |
def get_collection_endpoint(cls):
"""
Get the relative path to the API resource collection
If self.collection_endpoint is not set, it will default to the lowercase name of the resource class plus an "s" and the terminating "/"
:param cls: Resource class
:return: Relative path to the resource collection
"""
return cls.Meta.collection_endpoint if cls.Meta.collection_endpoint is not None else cls.__name__.lower() + "s/" | def function[get_collection_endpoint, parameter[cls]]:
constant[
Get the relative path to the API resource collection
If self.collection_endpoint is not set, it will default to the lowercase name of the resource class plus an "s" and the terminating "/"
:param cls: Resource class
:return: Relative path to the resource collection
]
return[<ast.IfExp object at 0x7da20e9b3760>] | keyword[def] identifier[get_collection_endpoint] ( identifier[cls] ):
literal[string]
keyword[return] identifier[cls] . identifier[Meta] . identifier[collection_endpoint] keyword[if] identifier[cls] . identifier[Meta] . identifier[collection_endpoint] keyword[is] keyword[not] keyword[None] keyword[else] identifier[cls] . identifier[__name__] . identifier[lower] ()+ literal[string] | def get_collection_endpoint(cls):
"""
Get the relative path to the API resource collection
If self.collection_endpoint is not set, it will default to the lowercase name of the resource class plus an "s" and the terminating "/"
:param cls: Resource class
:return: Relative path to the resource collection
"""
return cls.Meta.collection_endpoint if cls.Meta.collection_endpoint is not None else cls.__name__.lower() + 's/' |
def do_not_disturb(self, enabled):
"""Set do not disturb."""
self._set_setting(
{
CONST.SETTINGS_DO_NOT_DISTURB: str(enabled).lower()
}) | def function[do_not_disturb, parameter[self, enabled]]:
constant[Set do not disturb.]
call[name[self]._set_setting, parameter[dictionary[[<ast.Attribute object at 0x7da207f00460>], [<ast.Call object at 0x7da207f02020>]]]] | keyword[def] identifier[do_not_disturb] ( identifier[self] , identifier[enabled] ):
literal[string]
identifier[self] . identifier[_set_setting] (
{
identifier[CONST] . identifier[SETTINGS_DO_NOT_DISTURB] : identifier[str] ( identifier[enabled] ). identifier[lower] ()
}) | def do_not_disturb(self, enabled):
"""Set do not disturb."""
self._set_setting({CONST.SETTINGS_DO_NOT_DISTURB: str(enabled).lower()}) |
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge System Terminator
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('ST record not yet initialized!')
return b'ST' + struct.pack('=BB', RRSTRecord.length(), SU_ENTRY_VERSION) | def function[record, parameter[self]]:
constant[
Generate a string representing the Rock Ridge System Terminator
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
]
if <ast.UnaryOp object at 0x7da18f7224d0> begin[:]
<ast.Raise object at 0x7da18f7207f0>
return[binary_operation[constant[b'ST'] + call[name[struct].pack, parameter[constant[=BB], call[name[RRSTRecord].length, parameter[]], name[SU_ENTRY_VERSION]]]]] | keyword[def] identifier[record] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[return] literal[string] + identifier[struct] . identifier[pack] ( literal[string] , identifier[RRSTRecord] . identifier[length] (), identifier[SU_ENTRY_VERSION] ) | def record(self):
# type: () -> bytes
'\n Generate a string representing the Rock Ridge System Terminator\n record.\n\n Parameters:\n None.\n Returns:\n String containing the Rock Ridge record.\n '
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('ST record not yet initialized!') # depends on [control=['if'], data=[]]
return b'ST' + struct.pack('=BB', RRSTRecord.length(), SU_ENTRY_VERSION) |
def notify(title, message, **kwargs):
"""
This backend automatically selects the correct desktop notification backend
for your operating system.
"""
for os in ['linux', 'win32', 'darwin']:
if platform.startswith(os):
module = import_module('ntfy.backends.{}'.format(os))
try:
module.notify(title=title, message=message, **kwargs)
except Exception as e:
raise DefaultNotifierError(e, module)
break | def function[notify, parameter[title, message]]:
constant[
This backend automatically selects the correct desktop notification backend
for your operating system.
]
for taget[name[os]] in starred[list[[<ast.Constant object at 0x7da1b20be6e0>, <ast.Constant object at 0x7da1b20bf7f0>, <ast.Constant object at 0x7da1b1d536d0>]]] begin[:]
if call[name[platform].startswith, parameter[name[os]]] begin[:]
variable[module] assign[=] call[name[import_module], parameter[call[constant[ntfy.backends.{}].format, parameter[name[os]]]]]
<ast.Try object at 0x7da1b1d51c00>
break | keyword[def] identifier[notify] ( identifier[title] , identifier[message] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[os] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[platform] . identifier[startswith] ( identifier[os] ):
identifier[module] = identifier[import_module] ( literal[string] . identifier[format] ( identifier[os] ))
keyword[try] :
identifier[module] . identifier[notify] ( identifier[title] = identifier[title] , identifier[message] = identifier[message] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[DefaultNotifierError] ( identifier[e] , identifier[module] )
keyword[break] | def notify(title, message, **kwargs):
"""
This backend automatically selects the correct desktop notification backend
for your operating system.
"""
for os in ['linux', 'win32', 'darwin']:
if platform.startswith(os):
module = import_module('ntfy.backends.{}'.format(os))
try:
module.notify(title=title, message=message, **kwargs) # depends on [control=['try'], data=[]]
except Exception as e:
raise DefaultNotifierError(e, module) # depends on [control=['except'], data=['e']]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['os']] |
def Add(self, other):
"""Add other to self pointwise.
Requires that both self and other are of the same length, and contain
identical timestamps. Typically this means that Normalize has been called
on both with identical time parameters.
Args:
other: The sequence to add to self.
Raises:
RuntimeError: other does not contain the same timestamps as self.
"""
if len(self.data) != len(other.data):
raise RuntimeError("Can only add series of identical lengths.")
for i in range(len(self.data)):
if self.data[i][1] != other.data[i][1]:
raise RuntimeError("Timestamp mismatch.")
if self.data[i][0] is None and other.data[i][0] is None:
continue
self.data[i][0] = (self.data[i][0] or 0) + (other.data[i][0] or 0) | def function[Add, parameter[self, other]]:
constant[Add other to self pointwise.
Requires that both self and other are of the same length, and contain
identical timestamps. Typically this means that Normalize has been called
on both with identical time parameters.
Args:
other: The sequence to add to self.
Raises:
RuntimeError: other does not contain the same timestamps as self.
]
if compare[call[name[len], parameter[name[self].data]] not_equal[!=] call[name[len], parameter[name[other].data]]] begin[:]
<ast.Raise object at 0x7da1b1b445e0>
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].data]]]]] begin[:]
if compare[call[call[name[self].data][name[i]]][constant[1]] not_equal[!=] call[call[name[other].data][name[i]]][constant[1]]] begin[:]
<ast.Raise object at 0x7da1b1b47d00>
if <ast.BoolOp object at 0x7da1b1b44430> begin[:]
continue
call[call[name[self].data][name[i]]][constant[0]] assign[=] binary_operation[<ast.BoolOp object at 0x7da1b1b464d0> + <ast.BoolOp object at 0x7da1b1b47970>] | keyword[def] identifier[Add] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[data] )!= identifier[len] ( identifier[other] . identifier[data] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[data] )):
keyword[if] identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ]!= identifier[other] . identifier[data] [ identifier[i] ][ literal[int] ]:
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ] keyword[is] keyword[None] keyword[and] identifier[other] . identifier[data] [ identifier[i] ][ literal[int] ] keyword[is] keyword[None] :
keyword[continue]
identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ]=( identifier[self] . identifier[data] [ identifier[i] ][ literal[int] ] keyword[or] literal[int] )+( identifier[other] . identifier[data] [ identifier[i] ][ literal[int] ] keyword[or] literal[int] ) | def Add(self, other):
"""Add other to self pointwise.
Requires that both self and other are of the same length, and contain
identical timestamps. Typically this means that Normalize has been called
on both with identical time parameters.
Args:
other: The sequence to add to self.
Raises:
RuntimeError: other does not contain the same timestamps as self.
"""
if len(self.data) != len(other.data):
raise RuntimeError('Can only add series of identical lengths.') # depends on [control=['if'], data=[]]
for i in range(len(self.data)):
if self.data[i][1] != other.data[i][1]:
raise RuntimeError('Timestamp mismatch.') # depends on [control=['if'], data=[]]
if self.data[i][0] is None and other.data[i][0] is None:
continue # depends on [control=['if'], data=[]]
self.data[i][0] = (self.data[i][0] or 0) + (other.data[i][0] or 0) # depends on [control=['for'], data=['i']] |
def jquery_update_text(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT):
""" The shorter version of self.jquery_update_text_value()
(The longer version remains for backwards compatibility.) """
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.jquery_update_text_value(
selector, new_value, by=by, timeout=timeout) | def function[jquery_update_text, parameter[self, selector, new_value, by, timeout]]:
constant[ The shorter version of self.jquery_update_text_value()
(The longer version remains for backwards compatibility.) ]
if <ast.BoolOp object at 0x7da1b1bfba60> begin[:]
variable[timeout] assign[=] call[name[self].__get_new_timeout, parameter[name[timeout]]]
call[name[self].jquery_update_text_value, parameter[name[selector], name[new_value]]] | keyword[def] identifier[jquery_update_text] ( identifier[self] , identifier[selector] , identifier[new_value] , identifier[by] = identifier[By] . identifier[CSS_SELECTOR] ,
identifier[timeout] = identifier[settings] . identifier[LARGE_TIMEOUT] ):
literal[string]
keyword[if] identifier[self] . identifier[timeout_multiplier] keyword[and] identifier[timeout] == identifier[settings] . identifier[LARGE_TIMEOUT] :
identifier[timeout] = identifier[self] . identifier[__get_new_timeout] ( identifier[timeout] )
identifier[self] . identifier[jquery_update_text_value] (
identifier[selector] , identifier[new_value] , identifier[by] = identifier[by] , identifier[timeout] = identifier[timeout] ) | def jquery_update_text(self, selector, new_value, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT):
""" The shorter version of self.jquery_update_text_value()
(The longer version remains for backwards compatibility.) """
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout) # depends on [control=['if'], data=[]]
self.jquery_update_text_value(selector, new_value, by=by, timeout=timeout) |
def cleanup_a_alpha_and_derivatives(self):
r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component'''
del(self.a, self.Tc, self.S1, self.S2) | def function[cleanup_a_alpha_and_derivatives, parameter[self]]:
constant[Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component]
<ast.Delete object at 0x7da18eb55750> | keyword[def] identifier[cleanup_a_alpha_and_derivatives] ( identifier[self] ):
literal[string]
keyword[del] ( identifier[self] . identifier[a] , identifier[self] . identifier[Tc] , identifier[self] . identifier[S1] , identifier[self] . identifier[S2] ) | def cleanup_a_alpha_and_derivatives(self):
"""Removes properties set by `setup_a_alpha_and_derivatives`; run by
`GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for
every component"""
del (self.a, self.Tc, self.S1, self.S2) |
def check(self):
"""
Implements user message checking for views.
Checks if the current request has an explicit "ignore-message"
parameter (a GUID) pointing to a message with identical text from a
previous request, in which case further processing is allowed.
"""
request = get_current_request()
ignore_guid = request.params.get('ignore-message')
coll = request.root['_messages']
vote = False
if ignore_guid:
ignore_mb = coll.get(ignore_guid)
if not ignore_mb is None and ignore_mb.text == self.message.text:
vote = True
return vote | def function[check, parameter[self]]:
constant[
Implements user message checking for views.
Checks if the current request has an explicit "ignore-message"
parameter (a GUID) pointing to a message with identical text from a
previous request, in which case further processing is allowed.
]
variable[request] assign[=] call[name[get_current_request], parameter[]]
variable[ignore_guid] assign[=] call[name[request].params.get, parameter[constant[ignore-message]]]
variable[coll] assign[=] call[name[request].root][constant[_messages]]
variable[vote] assign[=] constant[False]
if name[ignore_guid] begin[:]
variable[ignore_mb] assign[=] call[name[coll].get, parameter[name[ignore_guid]]]
if <ast.BoolOp object at 0x7da2041dab90> begin[:]
variable[vote] assign[=] constant[True]
return[name[vote]] | keyword[def] identifier[check] ( identifier[self] ):
literal[string]
identifier[request] = identifier[get_current_request] ()
identifier[ignore_guid] = identifier[request] . identifier[params] . identifier[get] ( literal[string] )
identifier[coll] = identifier[request] . identifier[root] [ literal[string] ]
identifier[vote] = keyword[False]
keyword[if] identifier[ignore_guid] :
identifier[ignore_mb] = identifier[coll] . identifier[get] ( identifier[ignore_guid] )
keyword[if] keyword[not] identifier[ignore_mb] keyword[is] keyword[None] keyword[and] identifier[ignore_mb] . identifier[text] == identifier[self] . identifier[message] . identifier[text] :
identifier[vote] = keyword[True]
keyword[return] identifier[vote] | def check(self):
"""
Implements user message checking for views.
Checks if the current request has an explicit "ignore-message"
parameter (a GUID) pointing to a message with identical text from a
previous request, in which case further processing is allowed.
"""
request = get_current_request()
ignore_guid = request.params.get('ignore-message')
coll = request.root['_messages']
vote = False
if ignore_guid:
ignore_mb = coll.get(ignore_guid)
if not ignore_mb is None and ignore_mb.text == self.message.text:
vote = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return vote |
def reset(project, user):
"""Reset system, delete all output files and prepare for a new run"""
d = Project.path(project, user) + "output"
if os.path.isdir(d):
shutil.rmtree(d)
os.makedirs(d)
else:
raise flask.abort(404)
if os.path.exists(Project.path(project, user) + ".done"):
os.unlink(Project.path(project, user) + ".done")
if os.path.exists(Project.path(project, user) + ".status"):
os.unlink(Project.path(project, user) + ".status") | def function[reset, parameter[project, user]]:
constant[Reset system, delete all output files and prepare for a new run]
variable[d] assign[=] binary_operation[call[name[Project].path, parameter[name[project], name[user]]] + constant[output]]
if call[name[os].path.isdir, parameter[name[d]]] begin[:]
call[name[shutil].rmtree, parameter[name[d]]]
call[name[os].makedirs, parameter[name[d]]]
if call[name[os].path.exists, parameter[binary_operation[call[name[Project].path, parameter[name[project], name[user]]] + constant[.done]]]] begin[:]
call[name[os].unlink, parameter[binary_operation[call[name[Project].path, parameter[name[project], name[user]]] + constant[.done]]]]
if call[name[os].path.exists, parameter[binary_operation[call[name[Project].path, parameter[name[project], name[user]]] + constant[.status]]]] begin[:]
call[name[os].unlink, parameter[binary_operation[call[name[Project].path, parameter[name[project], name[user]]] + constant[.status]]]] | keyword[def] identifier[reset] ( identifier[project] , identifier[user] ):
literal[string]
identifier[d] = identifier[Project] . identifier[path] ( identifier[project] , identifier[user] )+ literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[d] ):
identifier[shutil] . identifier[rmtree] ( identifier[d] )
identifier[os] . identifier[makedirs] ( identifier[d] )
keyword[else] :
keyword[raise] identifier[flask] . identifier[abort] ( literal[int] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[Project] . identifier[path] ( identifier[project] , identifier[user] )+ literal[string] ):
identifier[os] . identifier[unlink] ( identifier[Project] . identifier[path] ( identifier[project] , identifier[user] )+ literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[Project] . identifier[path] ( identifier[project] , identifier[user] )+ literal[string] ):
identifier[os] . identifier[unlink] ( identifier[Project] . identifier[path] ( identifier[project] , identifier[user] )+ literal[string] ) | def reset(project, user):
"""Reset system, delete all output files and prepare for a new run"""
d = Project.path(project, user) + 'output'
if os.path.isdir(d):
shutil.rmtree(d)
os.makedirs(d) # depends on [control=['if'], data=[]]
else:
raise flask.abort(404)
if os.path.exists(Project.path(project, user) + '.done'):
os.unlink(Project.path(project, user) + '.done') # depends on [control=['if'], data=[]]
if os.path.exists(Project.path(project, user) + '.status'):
os.unlink(Project.path(project, user) + '.status') # depends on [control=['if'], data=[]] |
def _escape_lucene_query(query, field=None):
"""
Escapes special characters in Solr queries.
Note that this omits * - this is intentionally permitted in user queries.
The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters
"""
replacement = r"\\\1"
return re.sub(r'([\'" +\-!\(\)\{\}\[\]^"~?:\\/]|&&|\|\|)', replacement, query) | def function[_escape_lucene_query, parameter[query, field]]:
constant[
Escapes special characters in Solr queries.
Note that this omits * - this is intentionally permitted in user queries.
The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters
]
variable[replacement] assign[=] constant[\\\1]
return[call[name[re].sub, parameter[constant[([\'" +\-!\(\)\{\}\[\]^"~?:\\/]|&&|\|\|)], name[replacement], name[query]]]] | keyword[def] identifier[_escape_lucene_query] ( identifier[query] , identifier[field] = keyword[None] ):
literal[string]
identifier[replacement] = literal[string]
keyword[return] identifier[re] . identifier[sub] ( literal[string] , identifier[replacement] , identifier[query] ) | def _escape_lucene_query(query, field=None):
"""
Escapes special characters in Solr queries.
Note that this omits * - this is intentionally permitted in user queries.
The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters
"""
replacement = '\\\\\\1'
return re.sub('([\\\'" +\\-!\\(\\)\\{\\}\\[\\]^"~?:\\\\/]|&&|\\|\\|)', replacement, query) |
def mergeSplitsOnInterfaces(root: LNode):
"""
collect all split/concatenation nodes and group them by target interface
"""
for ch in root.children:
if ch.children:
mergeSplitsOnInterfaces(ch)
ctx = MergeSplitsOnInterfacesCtx()
for ch in root.children:
srcPorts = None
try:
if ch.name == "CONCAT":
p = single(ch.east, lambda x: True)
e = single(p.outgoingEdges, lambda x: True)
srcPorts = e.dsts
elif ch.name == "SLICE":
p = single(ch.west, lambda x: True)
e = single(p.incomingEdges, lambda x: True)
srcPorts = e.srcs
except (DuplicitValueExc, NoValueExc):
continue
if srcPorts is not None:
for srcPort in srcPorts:
if isinstance(srcPort.parent, LPort):
# only for non primitive ports
rootPort = getRootIntfPort(srcPort)
ctx.register(rootPort, ch, e)
# join them if it is possible
for srcPort, splitsAndConcats in ctx.iterPortSplits():
if len(splitsAndConcats) <= 1:
continue
name = "SPLIT" if srcPort.direction == PortType.OUTPUT else "CONCAT"
newSplitNode = root.addNode(name)
copyPort(srcPort, newSplitNode, True, "")
n = splitsAndConcats[0][0]
for i in range(max(len(n.west),
len(n.east))):
copyPort(
srcPort, newSplitNode,
False, "[%d]" % i)
reconnectPorts(root, srcPort, splitsAndConcats,
newSplitNode) | def function[mergeSplitsOnInterfaces, parameter[root]]:
constant[
collect all split/concatenation nodes and group them by target interface
]
for taget[name[ch]] in starred[name[root].children] begin[:]
if name[ch].children begin[:]
call[name[mergeSplitsOnInterfaces], parameter[name[ch]]]
variable[ctx] assign[=] call[name[MergeSplitsOnInterfacesCtx], parameter[]]
for taget[name[ch]] in starred[name[root].children] begin[:]
variable[srcPorts] assign[=] constant[None]
<ast.Try object at 0x7da1b242a050>
if compare[name[srcPorts] is_not constant[None]] begin[:]
for taget[name[srcPort]] in starred[name[srcPorts]] begin[:]
if call[name[isinstance], parameter[name[srcPort].parent, name[LPort]]] begin[:]
variable[rootPort] assign[=] call[name[getRootIntfPort], parameter[name[srcPort]]]
call[name[ctx].register, parameter[name[rootPort], name[ch], name[e]]]
for taget[tuple[[<ast.Name object at 0x7da1b24e3d90>, <ast.Name object at 0x7da1b24e1630>]]] in starred[call[name[ctx].iterPortSplits, parameter[]]] begin[:]
if compare[call[name[len], parameter[name[splitsAndConcats]]] less_or_equal[<=] constant[1]] begin[:]
continue
variable[name] assign[=] <ast.IfExp object at 0x7da1b24e2710>
variable[newSplitNode] assign[=] call[name[root].addNode, parameter[name[name]]]
call[name[copyPort], parameter[name[srcPort], name[newSplitNode], constant[True], constant[]]]
variable[n] assign[=] call[call[name[splitsAndConcats]][constant[0]]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[max], parameter[call[name[len], parameter[name[n].west]], call[name[len], parameter[name[n].east]]]]]]] begin[:]
call[name[copyPort], parameter[name[srcPort], name[newSplitNode], constant[False], binary_operation[constant[[%d]] <ast.Mod object at 0x7da2590d6920> name[i]]]]
call[name[reconnectPorts], parameter[name[root], name[srcPort], name[splitsAndConcats], name[newSplitNode]]] | keyword[def] identifier[mergeSplitsOnInterfaces] ( identifier[root] : identifier[LNode] ):
literal[string]
keyword[for] identifier[ch] keyword[in] identifier[root] . identifier[children] :
keyword[if] identifier[ch] . identifier[children] :
identifier[mergeSplitsOnInterfaces] ( identifier[ch] )
identifier[ctx] = identifier[MergeSplitsOnInterfacesCtx] ()
keyword[for] identifier[ch] keyword[in] identifier[root] . identifier[children] :
identifier[srcPorts] = keyword[None]
keyword[try] :
keyword[if] identifier[ch] . identifier[name] == literal[string] :
identifier[p] = identifier[single] ( identifier[ch] . identifier[east] , keyword[lambda] identifier[x] : keyword[True] )
identifier[e] = identifier[single] ( identifier[p] . identifier[outgoingEdges] , keyword[lambda] identifier[x] : keyword[True] )
identifier[srcPorts] = identifier[e] . identifier[dsts]
keyword[elif] identifier[ch] . identifier[name] == literal[string] :
identifier[p] = identifier[single] ( identifier[ch] . identifier[west] , keyword[lambda] identifier[x] : keyword[True] )
identifier[e] = identifier[single] ( identifier[p] . identifier[incomingEdges] , keyword[lambda] identifier[x] : keyword[True] )
identifier[srcPorts] = identifier[e] . identifier[srcs]
keyword[except] ( identifier[DuplicitValueExc] , identifier[NoValueExc] ):
keyword[continue]
keyword[if] identifier[srcPorts] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[srcPort] keyword[in] identifier[srcPorts] :
keyword[if] identifier[isinstance] ( identifier[srcPort] . identifier[parent] , identifier[LPort] ):
identifier[rootPort] = identifier[getRootIntfPort] ( identifier[srcPort] )
identifier[ctx] . identifier[register] ( identifier[rootPort] , identifier[ch] , identifier[e] )
keyword[for] identifier[srcPort] , identifier[splitsAndConcats] keyword[in] identifier[ctx] . identifier[iterPortSplits] ():
keyword[if] identifier[len] ( identifier[splitsAndConcats] )<= literal[int] :
keyword[continue]
identifier[name] = literal[string] keyword[if] identifier[srcPort] . identifier[direction] == identifier[PortType] . identifier[OUTPUT] keyword[else] literal[string]
identifier[newSplitNode] = identifier[root] . identifier[addNode] ( identifier[name] )
identifier[copyPort] ( identifier[srcPort] , identifier[newSplitNode] , keyword[True] , literal[string] )
identifier[n] = identifier[splitsAndConcats] [ literal[int] ][ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[max] ( identifier[len] ( identifier[n] . identifier[west] ),
identifier[len] ( identifier[n] . identifier[east] ))):
identifier[copyPort] (
identifier[srcPort] , identifier[newSplitNode] ,
keyword[False] , literal[string] % identifier[i] )
identifier[reconnectPorts] ( identifier[root] , identifier[srcPort] , identifier[splitsAndConcats] ,
identifier[newSplitNode] ) | def mergeSplitsOnInterfaces(root: LNode):
"""
collect all split/concatenation nodes and group them by target interface
"""
for ch in root.children:
if ch.children:
mergeSplitsOnInterfaces(ch) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ch']]
ctx = MergeSplitsOnInterfacesCtx()
for ch in root.children:
srcPorts = None
try:
if ch.name == 'CONCAT':
p = single(ch.east, lambda x: True)
e = single(p.outgoingEdges, lambda x: True)
srcPorts = e.dsts # depends on [control=['if'], data=[]]
elif ch.name == 'SLICE':
p = single(ch.west, lambda x: True)
e = single(p.incomingEdges, lambda x: True)
srcPorts = e.srcs # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (DuplicitValueExc, NoValueExc):
continue # depends on [control=['except'], data=[]]
if srcPorts is not None:
for srcPort in srcPorts:
if isinstance(srcPort.parent, LPort):
# only for non primitive ports
rootPort = getRootIntfPort(srcPort)
ctx.register(rootPort, ch, e) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['srcPort']] # depends on [control=['if'], data=['srcPorts']] # depends on [control=['for'], data=['ch']]
# join them if it is possible
for (srcPort, splitsAndConcats) in ctx.iterPortSplits():
if len(splitsAndConcats) <= 1:
continue # depends on [control=['if'], data=[]]
name = 'SPLIT' if srcPort.direction == PortType.OUTPUT else 'CONCAT'
newSplitNode = root.addNode(name)
copyPort(srcPort, newSplitNode, True, '')
n = splitsAndConcats[0][0]
for i in range(max(len(n.west), len(n.east))):
copyPort(srcPort, newSplitNode, False, '[%d]' % i) # depends on [control=['for'], data=['i']]
reconnectPorts(root, srcPort, splitsAndConcats, newSplitNode) # depends on [control=['for'], data=[]] |
def _call(self, x, out=None):
"""Implement ``self(x[, out])``."""
if out is None:
return self.operator(x) * self.vector
else:
self.operator(x, out=out)
out *= self.vector | def function[_call, parameter[self, x, out]]:
constant[Implement ``self(x[, out])``.]
if compare[name[out] is constant[None]] begin[:]
return[binary_operation[call[name[self].operator, parameter[name[x]]] * name[self].vector]] | keyword[def] identifier[_call] ( identifier[self] , identifier[x] , identifier[out] = keyword[None] ):
literal[string]
keyword[if] identifier[out] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[operator] ( identifier[x] )* identifier[self] . identifier[vector]
keyword[else] :
identifier[self] . identifier[operator] ( identifier[x] , identifier[out] = identifier[out] )
identifier[out] *= identifier[self] . identifier[vector] | def _call(self, x, out=None):
"""Implement ``self(x[, out])``."""
if out is None:
return self.operator(x) * self.vector # depends on [control=['if'], data=[]]
else:
self.operator(x, out=out)
out *= self.vector |
def dictstr(arg):
"""
Parse a key=value string as a tuple (key, value) that can be provided as an argument to dict()
"""
key, value = arg.split("=")
if value.lower() == "true" or value.lower() == "false":
value = bool(value)
elif INT_RE.match(value):
value = int(value)
elif FLOAT_RE.match(value):
value = float(value)
return (key, value) | def function[dictstr, parameter[arg]]:
constant[
Parse a key=value string as a tuple (key, value) that can be provided as an argument to dict()
]
<ast.Tuple object at 0x7da1b04ff640> assign[=] call[name[arg].split, parameter[constant[=]]]
if <ast.BoolOp object at 0x7da1b04fd6c0> begin[:]
variable[value] assign[=] call[name[bool], parameter[name[value]]]
return[tuple[[<ast.Name object at 0x7da1b05f8040>, <ast.Name object at 0x7da1b05f9c00>]]] | keyword[def] identifier[dictstr] ( identifier[arg] ):
literal[string]
identifier[key] , identifier[value] = identifier[arg] . identifier[split] ( literal[string] )
keyword[if] identifier[value] . identifier[lower] ()== literal[string] keyword[or] identifier[value] . identifier[lower] ()== literal[string] :
identifier[value] = identifier[bool] ( identifier[value] )
keyword[elif] identifier[INT_RE] . identifier[match] ( identifier[value] ):
identifier[value] = identifier[int] ( identifier[value] )
keyword[elif] identifier[FLOAT_RE] . identifier[match] ( identifier[value] ):
identifier[value] = identifier[float] ( identifier[value] )
keyword[return] ( identifier[key] , identifier[value] ) | def dictstr(arg):
"""
Parse a key=value string as a tuple (key, value) that can be provided as an argument to dict()
"""
(key, value) = arg.split('=')
if value.lower() == 'true' or value.lower() == 'false':
value = bool(value) # depends on [control=['if'], data=[]]
elif INT_RE.match(value):
value = int(value) # depends on [control=['if'], data=[]]
elif FLOAT_RE.match(value):
value = float(value) # depends on [control=['if'], data=[]]
return (key, value) |
def _unbytes(bytestr):
"""
Returns a bytestring from the human-friendly string returned by `_bytes`.
>>> _unbytes('123456')
'\x12\x34\x56'
"""
return ''.join(chr(int(bytestr[k:k + 2], 16))
for k in range(0, len(bytestr), 2)) | def function[_unbytes, parameter[bytestr]]:
constant[
Returns a bytestring from the human-friendly string returned by `_bytes`.
>>> _unbytes('123456')
'4V'
]
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da20e956da0>]]] | keyword[def] identifier[_unbytes] ( identifier[bytestr] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[chr] ( identifier[int] ( identifier[bytestr] [ identifier[k] : identifier[k] + literal[int] ], literal[int] ))
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[bytestr] ), literal[int] )) | def _unbytes(bytestr):
"""
Returns a bytestring from the human-friendly string returned by `_bytes`.
>>> _unbytes('123456')
'\x124V'
"""
return ''.join((chr(int(bytestr[k:k + 2], 16)) for k in range(0, len(bytestr), 2))) |
def handle(job_id, log_level='info', log_file=None):
"""
Context manager adding and removing log handlers.
:param job_id:
ID of the current job
:param log_level:
one of debug, info, warn, error, critical
:param log_file:
log file path (if None, logs on stdout only)
"""
handlers = [LogDatabaseHandler(job_id)] # log on db always
if log_file is None:
# add a StreamHandler if not already there
if not any(h for h in logging.root.handlers
if isinstance(h, logging.StreamHandler)):
handlers.append(LogStreamHandler(job_id))
else:
handlers.append(LogFileHandler(job_id, log_file))
for handler in handlers:
logging.root.addHandler(handler)
init(job_id, LEVELS.get(log_level, logging.WARNING))
try:
yield
finally:
# sanity check to make sure that the logging on file is working
if (log_file and log_file != os.devnull and
os.path.getsize(log_file) == 0):
logging.root.warn('The log file %s is empty!?' % log_file)
for handler in handlers:
logging.root.removeHandler(handler) | def function[handle, parameter[job_id, log_level, log_file]]:
constant[
Context manager adding and removing log handlers.
:param job_id:
ID of the current job
:param log_level:
one of debug, info, warn, error, critical
:param log_file:
log file path (if None, logs on stdout only)
]
variable[handlers] assign[=] list[[<ast.Call object at 0x7da2054a6560>]]
if compare[name[log_file] is constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da2054a47f0> begin[:]
call[name[handlers].append, parameter[call[name[LogStreamHandler], parameter[name[job_id]]]]]
for taget[name[handler]] in starred[name[handlers]] begin[:]
call[name[logging].root.addHandler, parameter[name[handler]]]
call[name[init], parameter[name[job_id], call[name[LEVELS].get, parameter[name[log_level], name[logging].WARNING]]]]
<ast.Try object at 0x7da2054a7910> | keyword[def] identifier[handle] ( identifier[job_id] , identifier[log_level] = literal[string] , identifier[log_file] = keyword[None] ):
literal[string]
identifier[handlers] =[ identifier[LogDatabaseHandler] ( identifier[job_id] )]
keyword[if] identifier[log_file] keyword[is] keyword[None] :
keyword[if] keyword[not] identifier[any] ( identifier[h] keyword[for] identifier[h] keyword[in] identifier[logging] . identifier[root] . identifier[handlers]
keyword[if] identifier[isinstance] ( identifier[h] , identifier[logging] . identifier[StreamHandler] )):
identifier[handlers] . identifier[append] ( identifier[LogStreamHandler] ( identifier[job_id] ))
keyword[else] :
identifier[handlers] . identifier[append] ( identifier[LogFileHandler] ( identifier[job_id] , identifier[log_file] ))
keyword[for] identifier[handler] keyword[in] identifier[handlers] :
identifier[logging] . identifier[root] . identifier[addHandler] ( identifier[handler] )
identifier[init] ( identifier[job_id] , identifier[LEVELS] . identifier[get] ( identifier[log_level] , identifier[logging] . identifier[WARNING] ))
keyword[try] :
keyword[yield]
keyword[finally] :
keyword[if] ( identifier[log_file] keyword[and] identifier[log_file] != identifier[os] . identifier[devnull] keyword[and]
identifier[os] . identifier[path] . identifier[getsize] ( identifier[log_file] )== literal[int] ):
identifier[logging] . identifier[root] . identifier[warn] ( literal[string] % identifier[log_file] )
keyword[for] identifier[handler] keyword[in] identifier[handlers] :
identifier[logging] . identifier[root] . identifier[removeHandler] ( identifier[handler] ) | def handle(job_id, log_level='info', log_file=None):
"""
Context manager adding and removing log handlers.
:param job_id:
ID of the current job
:param log_level:
one of debug, info, warn, error, critical
:param log_file:
log file path (if None, logs on stdout only)
"""
handlers = [LogDatabaseHandler(job_id)] # log on db always
if log_file is None:
# add a StreamHandler if not already there
if not any((h for h in logging.root.handlers if isinstance(h, logging.StreamHandler))):
handlers.append(LogStreamHandler(job_id)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
handlers.append(LogFileHandler(job_id, log_file))
for handler in handlers:
logging.root.addHandler(handler) # depends on [control=['for'], data=['handler']]
init(job_id, LEVELS.get(log_level, logging.WARNING))
try:
yield # depends on [control=['try'], data=[]]
finally:
# sanity check to make sure that the logging on file is working
if log_file and log_file != os.devnull and (os.path.getsize(log_file) == 0):
logging.root.warn('The log file %s is empty!?' % log_file) # depends on [control=['if'], data=[]]
for handler in handlers:
logging.root.removeHandler(handler) # depends on [control=['for'], data=['handler']] |
def get_stp_mst_detail_output_cist_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
hello_time = ET.SubElement(cist, "hello-time")
hello_time.text = kwargs.pop('hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_stp_mst_detail_output_cist_hello_time, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_stp_mst_detail] assign[=] call[name[ET].Element, parameter[constant[get_stp_mst_detail]]]
variable[config] assign[=] name[get_stp_mst_detail]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_stp_mst_detail], constant[output]]]
variable[cist] assign[=] call[name[ET].SubElement, parameter[name[output], constant[cist]]]
variable[hello_time] assign[=] call[name[ET].SubElement, parameter[name[cist], constant[hello-time]]]
name[hello_time].text assign[=] call[name[kwargs].pop, parameter[constant[hello_time]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_stp_mst_detail_output_cist_hello_time] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_stp_mst_detail] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_stp_mst_detail]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_stp_mst_detail] , literal[string] )
identifier[cist] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[hello_time] = identifier[ET] . identifier[SubElement] ( identifier[cist] , literal[string] )
identifier[hello_time] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_stp_mst_detail_output_cist_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_stp_mst_detail = ET.Element('get_stp_mst_detail')
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, 'output')
cist = ET.SubElement(output, 'cist')
hello_time = ET.SubElement(cist, 'hello-time')
hello_time.text = kwargs.pop('hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def replace(self, identifier, new_instance):
"""
Create or update a model.
"""
try:
# Note that `self.update()` ultimately calls merge, which will not enforce
# a strict replacement; absent fields will default to the current values.
return self.update(identifier, new_instance)
except ModelNotFoundError:
return self.create(new_instance) | def function[replace, parameter[self, identifier, new_instance]]:
constant[
Create or update a model.
]
<ast.Try object at 0x7da1b0c0da20> | keyword[def] identifier[replace] ( identifier[self] , identifier[identifier] , identifier[new_instance] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[update] ( identifier[identifier] , identifier[new_instance] )
keyword[except] identifier[ModelNotFoundError] :
keyword[return] identifier[self] . identifier[create] ( identifier[new_instance] ) | def replace(self, identifier, new_instance):
"""
Create or update a model.
"""
try:
# Note that `self.update()` ultimately calls merge, which will not enforce
# a strict replacement; absent fields will default to the current values.
return self.update(identifier, new_instance) # depends on [control=['try'], data=[]]
except ModelNotFoundError:
return self.create(new_instance) # depends on [control=['except'], data=[]] |
def get_stats_of_rows(rows):
"""Calculcate number of true/false tasks and maximum achievable score."""
count_true = count_false = max_score = 0
for row in rows:
if not row.properties:
logging.info('Missing property for %s.', row.filename)
continue
if len(row.properties) > 1:
# multiple properties not yet supported
count_true = count_false = max_score = 0
break
expected_result = row.expected_results.get(row.properties[0].name)
if not expected_result:
continue
if expected_result.result is True:
count_true += 1
elif expected_result.result is False:
count_false += 1
for prop in row.properties:
max_score += prop.max_score(expected_result)
return max_score, count_true, count_false | def function[get_stats_of_rows, parameter[rows]]:
constant[Calculcate number of true/false tasks and maximum achievable score.]
variable[count_true] assign[=] constant[0]
for taget[name[row]] in starred[name[rows]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e7af0> begin[:]
call[name[logging].info, parameter[constant[Missing property for %s.], name[row].filename]]
continue
if compare[call[name[len], parameter[name[row].properties]] greater[>] constant[1]] begin[:]
variable[count_true] assign[=] constant[0]
break
variable[expected_result] assign[=] call[name[row].expected_results.get, parameter[call[name[row].properties][constant[0]].name]]
if <ast.UnaryOp object at 0x7da20c6e5810> begin[:]
continue
if compare[name[expected_result].result is constant[True]] begin[:]
<ast.AugAssign object at 0x7da20c6e78b0>
for taget[name[prop]] in starred[name[row].properties] begin[:]
<ast.AugAssign object at 0x7da20c6e6d40>
return[tuple[[<ast.Name object at 0x7da18bccbfa0>, <ast.Name object at 0x7da18bcc9c60>, <ast.Name object at 0x7da18bcca200>]]] | keyword[def] identifier[get_stats_of_rows] ( identifier[rows] ):
literal[string]
identifier[count_true] = identifier[count_false] = identifier[max_score] = literal[int]
keyword[for] identifier[row] keyword[in] identifier[rows] :
keyword[if] keyword[not] identifier[row] . identifier[properties] :
identifier[logging] . identifier[info] ( literal[string] , identifier[row] . identifier[filename] )
keyword[continue]
keyword[if] identifier[len] ( identifier[row] . identifier[properties] )> literal[int] :
identifier[count_true] = identifier[count_false] = identifier[max_score] = literal[int]
keyword[break]
identifier[expected_result] = identifier[row] . identifier[expected_results] . identifier[get] ( identifier[row] . identifier[properties] [ literal[int] ]. identifier[name] )
keyword[if] keyword[not] identifier[expected_result] :
keyword[continue]
keyword[if] identifier[expected_result] . identifier[result] keyword[is] keyword[True] :
identifier[count_true] += literal[int]
keyword[elif] identifier[expected_result] . identifier[result] keyword[is] keyword[False] :
identifier[count_false] += literal[int]
keyword[for] identifier[prop] keyword[in] identifier[row] . identifier[properties] :
identifier[max_score] += identifier[prop] . identifier[max_score] ( identifier[expected_result] )
keyword[return] identifier[max_score] , identifier[count_true] , identifier[count_false] | def get_stats_of_rows(rows):
"""Calculcate number of true/false tasks and maximum achievable score."""
count_true = count_false = max_score = 0
for row in rows:
if not row.properties:
logging.info('Missing property for %s.', row.filename)
continue # depends on [control=['if'], data=[]]
if len(row.properties) > 1:
# multiple properties not yet supported
count_true = count_false = max_score = 0
break # depends on [control=['if'], data=[]]
expected_result = row.expected_results.get(row.properties[0].name)
if not expected_result:
continue # depends on [control=['if'], data=[]]
if expected_result.result is True:
count_true += 1 # depends on [control=['if'], data=[]]
elif expected_result.result is False:
count_false += 1 # depends on [control=['if'], data=[]]
for prop in row.properties:
max_score += prop.max_score(expected_result) # depends on [control=['for'], data=['prop']] # depends on [control=['for'], data=['row']]
return (max_score, count_true, count_false) |
def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item | def function[createWidgetItem, parameter[self, item_type, elem, getter]]:
constant[ Create a specific type of widget item. ]
variable[item] assign[=] call[name[self].factory.createQObject, parameter[name[item_type], constant[item], tuple[[]], constant[False]]]
variable[props] assign[=] name[self].wprops
variable[text] assign[=] call[name[props].getProperty, parameter[name[elem], constant[text]]]
variable[status_tip] assign[=] call[name[props].getProperty, parameter[name[elem], constant[statusTip]]]
variable[tool_tip] assign[=] call[name[props].getProperty, parameter[name[elem], constant[toolTip]]]
variable[whats_this] assign[=] call[name[props].getProperty, parameter[name[elem], constant[whatsThis]]]
if call[name[self].any_i18n, parameter[name[text], name[status_tip], name[tool_tip], name[whats_this]]] begin[:]
call[name[self].factory.invoke, parameter[constant[item], name[getter], name[getter_args]]]
if name[text] begin[:]
call[name[item].setText, parameter[name[text]]]
if name[status_tip] begin[:]
call[name[item].setStatusTip, parameter[name[status_tip]]]
if name[tool_tip] begin[:]
call[name[item].setToolTip, parameter[name[tool_tip]]]
if name[whats_this] begin[:]
call[name[item].setWhatsThis, parameter[name[whats_this]]]
variable[text_alignment] assign[=] call[name[props].getProperty, parameter[name[elem], constant[textAlignment]]]
if name[text_alignment] begin[:]
call[name[item].setTextAlignment, parameter[name[text_alignment]]]
variable[font] assign[=] call[name[props].getProperty, parameter[name[elem], constant[font]]]
if name[font] begin[:]
call[name[item].setFont, parameter[name[font]]]
variable[icon] assign[=] call[name[props].getProperty, parameter[name[elem], constant[icon]]]
if name[icon] begin[:]
call[name[item].setIcon, parameter[name[icon]]]
variable[background] assign[=] call[name[props].getProperty, parameter[name[elem], constant[background]]]
if name[background] begin[:]
call[name[item].setBackground, parameter[name[background]]]
variable[foreground] assign[=] call[name[props].getProperty, parameter[name[elem], constant[foreground]]]
if name[foreground] begin[:]
call[name[item].setForeground, parameter[name[foreground]]]
variable[flags] assign[=] call[name[props].getProperty, parameter[name[elem], constant[flags]]]
if name[flags] begin[:]
call[name[item].setFlags, parameter[name[flags]]]
variable[check_state] assign[=] call[name[props].getProperty, parameter[name[elem], constant[checkState]]]
if name[check_state] begin[:]
call[name[item].setCheckState, parameter[name[check_state]]]
return[name[item]] | keyword[def] identifier[createWidgetItem] ( identifier[self] , identifier[item_type] , identifier[elem] , identifier[getter] ,* identifier[getter_args] ):
literal[string]
identifier[item] = identifier[self] . identifier[factory] . identifier[createQObject] ( identifier[item_type] , literal[string] ,(), keyword[False] )
identifier[props] = identifier[self] . identifier[wprops]
identifier[text] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
identifier[status_tip] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
identifier[tool_tip] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
identifier[whats_this] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[self] . identifier[any_i18n] ( identifier[text] , identifier[status_tip] , identifier[tool_tip] , identifier[whats_this] ):
identifier[self] . identifier[factory] . identifier[invoke] ( literal[string] , identifier[getter] , identifier[getter_args] )
keyword[if] identifier[text] :
identifier[item] . identifier[setText] ( identifier[text] )
keyword[if] identifier[status_tip] :
identifier[item] . identifier[setStatusTip] ( identifier[status_tip] )
keyword[if] identifier[tool_tip] :
identifier[item] . identifier[setToolTip] ( identifier[tool_tip] )
keyword[if] identifier[whats_this] :
identifier[item] . identifier[setWhatsThis] ( identifier[whats_this] )
identifier[text_alignment] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[text_alignment] :
identifier[item] . identifier[setTextAlignment] ( identifier[text_alignment] )
identifier[font] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[font] :
identifier[item] . identifier[setFont] ( identifier[font] )
identifier[icon] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[icon] :
identifier[item] . identifier[setIcon] ( identifier[icon] )
identifier[background] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[background] :
identifier[item] . identifier[setBackground] ( identifier[background] )
identifier[foreground] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[foreground] :
identifier[item] . identifier[setForeground] ( identifier[foreground] )
identifier[flags] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[flags] :
identifier[item] . identifier[setFlags] ( identifier[flags] )
identifier[check_state] = identifier[props] . identifier[getProperty] ( identifier[elem] , literal[string] )
keyword[if] identifier[check_state] :
identifier[item] . identifier[setCheckState] ( identifier[check_state] )
keyword[return] identifier[item] | def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, 'item', (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke('item', getter, getter_args) # depends on [control=['if'], data=[]]
if text:
item.setText(text) # depends on [control=['if'], data=[]]
if status_tip:
item.setStatusTip(status_tip) # depends on [control=['if'], data=[]]
if tool_tip:
item.setToolTip(tool_tip) # depends on [control=['if'], data=[]]
if whats_this:
item.setWhatsThis(whats_this) # depends on [control=['if'], data=[]]
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment) # depends on [control=['if'], data=[]]
font = props.getProperty(elem, 'font')
if font:
item.setFont(font) # depends on [control=['if'], data=[]]
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon) # depends on [control=['if'], data=[]]
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background) # depends on [control=['if'], data=[]]
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground) # depends on [control=['if'], data=[]]
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags) # depends on [control=['if'], data=[]]
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state) # depends on [control=['if'], data=[]]
return item |
def message(self) -> str:
""" The human readable message of the current stage """
if self.is_error:
assert self._error
return self._error.human
else:
return self._stage.value.human | def function[message, parameter[self]]:
constant[ The human readable message of the current stage ]
if name[self].is_error begin[:]
assert[name[self]._error]
return[name[self]._error.human] | keyword[def] identifier[message] ( identifier[self] )-> identifier[str] :
literal[string]
keyword[if] identifier[self] . identifier[is_error] :
keyword[assert] identifier[self] . identifier[_error]
keyword[return] identifier[self] . identifier[_error] . identifier[human]
keyword[else] :
keyword[return] identifier[self] . identifier[_stage] . identifier[value] . identifier[human] | def message(self) -> str:
""" The human readable message of the current stage """
if self.is_error:
assert self._error
return self._error.human # depends on [control=['if'], data=[]]
else:
return self._stage.value.human |
def get_iuse(cp):
'''
.. versionadded:: 2015.8.0
Gets the current IUSE flags from the tree.
@type: cpv: string
@param cpv: cat/pkg
@rtype list
@returns [] or the list of IUSE flags
'''
cpv = _get_cpv(cp)
try:
# aux_get might return dupes, so run them through set() to remove them
dirty_flags = _porttree().dbapi.aux_get(cpv, ["IUSE"])[0].split()
return list(set(dirty_flags))
except Exception as e:
return [] | def function[get_iuse, parameter[cp]]:
constant[
.. versionadded:: 2015.8.0
Gets the current IUSE flags from the tree.
@type: cpv: string
@param cpv: cat/pkg
@rtype list
@returns [] or the list of IUSE flags
]
variable[cpv] assign[=] call[name[_get_cpv], parameter[name[cp]]]
<ast.Try object at 0x7da1b20b8670> | keyword[def] identifier[get_iuse] ( identifier[cp] ):
literal[string]
identifier[cpv] = identifier[_get_cpv] ( identifier[cp] )
keyword[try] :
identifier[dirty_flags] = identifier[_porttree] (). identifier[dbapi] . identifier[aux_get] ( identifier[cpv] ,[ literal[string] ])[ literal[int] ]. identifier[split] ()
keyword[return] identifier[list] ( identifier[set] ( identifier[dirty_flags] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] [] | def get_iuse(cp):
"""
.. versionadded:: 2015.8.0
Gets the current IUSE flags from the tree.
@type: cpv: string
@param cpv: cat/pkg
@rtype list
@returns [] or the list of IUSE flags
"""
cpv = _get_cpv(cp)
try:
# aux_get might return dupes, so run them through set() to remove them
dirty_flags = _porttree().dbapi.aux_get(cpv, ['IUSE'])[0].split()
return list(set(dirty_flags)) # depends on [control=['try'], data=[]]
except Exception as e:
return [] # depends on [control=['except'], data=[]] |
def get_queue_sizes(self, queue):
"""
Get the queue's number of tasks in each state.
Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE
states. Does not include size of error queue.
"""
states = [QUEUED, SCHEDULED, ACTIVE]
pipeline = self.connection.pipeline()
for state in states:
pipeline.zcard(self._key(state, queue))
results = pipeline.execute()
return dict(zip(states, results)) | def function[get_queue_sizes, parameter[self, queue]]:
constant[
Get the queue's number of tasks in each state.
Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE
states. Does not include size of error queue.
]
variable[states] assign[=] list[[<ast.Name object at 0x7da1b155e560>, <ast.Name object at 0x7da1b155ddb0>, <ast.Name object at 0x7da1b155e590>]]
variable[pipeline] assign[=] call[name[self].connection.pipeline, parameter[]]
for taget[name[state]] in starred[name[states]] begin[:]
call[name[pipeline].zcard, parameter[call[name[self]._key, parameter[name[state], name[queue]]]]]
variable[results] assign[=] call[name[pipeline].execute, parameter[]]
return[call[name[dict], parameter[call[name[zip], parameter[name[states], name[results]]]]]] | keyword[def] identifier[get_queue_sizes] ( identifier[self] , identifier[queue] ):
literal[string]
identifier[states] =[ identifier[QUEUED] , identifier[SCHEDULED] , identifier[ACTIVE] ]
identifier[pipeline] = identifier[self] . identifier[connection] . identifier[pipeline] ()
keyword[for] identifier[state] keyword[in] identifier[states] :
identifier[pipeline] . identifier[zcard] ( identifier[self] . identifier[_key] ( identifier[state] , identifier[queue] ))
identifier[results] = identifier[pipeline] . identifier[execute] ()
keyword[return] identifier[dict] ( identifier[zip] ( identifier[states] , identifier[results] )) | def get_queue_sizes(self, queue):
"""
Get the queue's number of tasks in each state.
Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE
states. Does not include size of error queue.
"""
states = [QUEUED, SCHEDULED, ACTIVE]
pipeline = self.connection.pipeline()
for state in states:
pipeline.zcard(self._key(state, queue)) # depends on [control=['for'], data=['state']]
results = pipeline.execute()
return dict(zip(states, results)) |
def _extract_dependencies_by_root(cls, result):
"""
Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
"""
flat_result = defaultdict(list)
for artifact in result['dependencies']:
flat_result[artifact['coord']].extend(artifact['dependencies'])
return flat_result | def function[_extract_dependencies_by_root, parameter[cls, result]]:
constant[
Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
]
variable[flat_result] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[artifact]] in starred[call[name[result]][constant[dependencies]]] begin[:]
call[call[name[flat_result]][call[name[artifact]][constant[coord]]].extend, parameter[call[name[artifact]][constant[dependencies]]]]
return[name[flat_result]] | keyword[def] identifier[_extract_dependencies_by_root] ( identifier[cls] , identifier[result] ):
literal[string]
identifier[flat_result] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[artifact] keyword[in] identifier[result] [ literal[string] ]:
identifier[flat_result] [ identifier[artifact] [ literal[string] ]]. identifier[extend] ( identifier[artifact] [ literal[string] ])
keyword[return] identifier[flat_result] | def _extract_dependencies_by_root(cls, result):
"""
Only extracts the transitive dependencies for the given coursier resolve.
Note the "dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
"""
flat_result = defaultdict(list)
for artifact in result['dependencies']:
flat_result[artifact['coord']].extend(artifact['dependencies']) # depends on [control=['for'], data=['artifact']]
return flat_result |
def read_document(fnm):
"""Read a document that is stored in a text file as JSON.
Parameters
----------
fnm: str
The path of the document.
Returns
-------
Text
"""
with codecs.open(fnm, 'rb', 'ascii') as f:
return Text(json.loads(f.read())) | def function[read_document, parameter[fnm]]:
constant[Read a document that is stored in a text file as JSON.
Parameters
----------
fnm: str
The path of the document.
Returns
-------
Text
]
with call[name[codecs].open, parameter[name[fnm], constant[rb], constant[ascii]]] begin[:]
return[call[name[Text], parameter[call[name[json].loads, parameter[call[name[f].read, parameter[]]]]]]] | keyword[def] identifier[read_document] ( identifier[fnm] ):
literal[string]
keyword[with] identifier[codecs] . identifier[open] ( identifier[fnm] , literal[string] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[Text] ( identifier[json] . identifier[loads] ( identifier[f] . identifier[read] ())) | def read_document(fnm):
"""Read a document that is stored in a text file as JSON.
Parameters
----------
fnm: str
The path of the document.
Returns
-------
Text
"""
with codecs.open(fnm, 'rb', 'ascii') as f:
return Text(json.loads(f.read())) # depends on [control=['with'], data=['f']] |
def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):
"""Builds a wheel, places it in wheel_directory"""
poetry = Poetry.create(".")
return unicode(
WheelBuilder.make_in(
poetry, SystemEnv(Path(sys.prefix)), NullIO(), Path(wheel_directory)
)
) | def function[build_wheel, parameter[wheel_directory, config_settings, metadata_directory]]:
constant[Builds a wheel, places it in wheel_directory]
variable[poetry] assign[=] call[name[Poetry].create, parameter[constant[.]]]
return[call[name[unicode], parameter[call[name[WheelBuilder].make_in, parameter[name[poetry], call[name[SystemEnv], parameter[call[name[Path], parameter[name[sys].prefix]]]], call[name[NullIO], parameter[]], call[name[Path], parameter[name[wheel_directory]]]]]]]] | keyword[def] identifier[build_wheel] ( identifier[wheel_directory] , identifier[config_settings] = keyword[None] , identifier[metadata_directory] = keyword[None] ):
literal[string]
identifier[poetry] = identifier[Poetry] . identifier[create] ( literal[string] )
keyword[return] identifier[unicode] (
identifier[WheelBuilder] . identifier[make_in] (
identifier[poetry] , identifier[SystemEnv] ( identifier[Path] ( identifier[sys] . identifier[prefix] )), identifier[NullIO] (), identifier[Path] ( identifier[wheel_directory] )
)
) | def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):
"""Builds a wheel, places it in wheel_directory"""
poetry = Poetry.create('.')
return unicode(WheelBuilder.make_in(poetry, SystemEnv(Path(sys.prefix)), NullIO(), Path(wheel_directory))) |
def next_counter(start=0, step=1):
r"""
Args:
start (int): (default = 0)
step (int): (default = 1)
Returns:
func: next_
CommandLine:
python -m utool.util_iter --test-next_counter
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> start = 1
>>> step = 1
>>> next_ = next_counter(start, step)
>>> result = str([next_(), next_(), next_()])
>>> print(result)
[1, 2, 3]
"""
count_gen = it.count(start, step)
next_ = functools.partial(six.next, count_gen)
return next_ | def function[next_counter, parameter[start, step]]:
constant[
Args:
start (int): (default = 0)
step (int): (default = 1)
Returns:
func: next_
CommandLine:
python -m utool.util_iter --test-next_counter
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> start = 1
>>> step = 1
>>> next_ = next_counter(start, step)
>>> result = str([next_(), next_(), next_()])
>>> print(result)
[1, 2, 3]
]
variable[count_gen] assign[=] call[name[it].count, parameter[name[start], name[step]]]
variable[next_] assign[=] call[name[functools].partial, parameter[name[six].next, name[count_gen]]]
return[name[next_]] | keyword[def] identifier[next_counter] ( identifier[start] = literal[int] , identifier[step] = literal[int] ):
literal[string]
identifier[count_gen] = identifier[it] . identifier[count] ( identifier[start] , identifier[step] )
identifier[next_] = identifier[functools] . identifier[partial] ( identifier[six] . identifier[next] , identifier[count_gen] )
keyword[return] identifier[next_] | def next_counter(start=0, step=1):
"""
Args:
start (int): (default = 0)
step (int): (default = 1)
Returns:
func: next_
CommandLine:
python -m utool.util_iter --test-next_counter
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> start = 1
>>> step = 1
>>> next_ = next_counter(start, step)
>>> result = str([next_(), next_(), next_()])
>>> print(result)
[1, 2, 3]
"""
count_gen = it.count(start, step)
next_ = functools.partial(six.next, count_gen)
return next_ |
def fig4(args):
"""
%prog fig4 layout data
Napus Figure 4A displays an example deleted region for quartet chromosomes,
showing read alignments from high GL and low GL lines.
"""
p = OptionParser(fig4.__doc__)
p.add_option("--gauge_step", default=200000, type="int",
help="Step size for the base scale")
opts, args, iopts = p.set_image_options(args, figsize="9x7")
if len(args) != 2:
sys.exit(not p.print_help())
layout, datadir = args
layout = F4ALayout(layout, datadir=datadir)
gs = opts.gauge_step
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
block, napusbed, slayout = "r28.txt", "all.bed", "r28.layout"
s = Synteny(fig, root, block, napusbed, slayout, chr_label=False)
synteny_exts = [(x.xstart, x.xend) for x in s.rr]
h = .1
order = "bzh,yudal".split(",")
labels = (r"\textit{B. napus} A$\mathsf{_n}$2",
r"\textit{B. rapa} A$\mathsf{_r}$2",
r"\textit{B. oleracea} C$\mathsf{_o}$2",
r"\textit{B. napus} C$\mathsf{_n}$2")
for t in layout:
xstart, xend = synteny_exts[2 * t.i]
canvas = [xstart, t.y, xend - xstart, h]
root.text(xstart - h, t.y + h / 2, labels[t.i], ha="center", va="center")
ch, ab = t.box_region.split(":")
a, b = ab.split("-")
vlines = [int(x) for x in (a, b)]
Coverage(fig, root, canvas, t.seqid, (t.start, t.end), datadir,
order=order, gauge="top", plot_chr_label=False,
gauge_step=gs, palette="gray",
cap=40, hlsuffix="regions.forhaibao",
vlines=vlines)
# Highlight GSL biosynthesis genes
a, b = (3, "Bra029311"), (5, "Bo2g161590")
for gid in (a, b):
start, end = s.gg[gid]
xstart, ystart = start
xend, yend = end
x = (xstart + xend) / 2
arrow = FancyArrowPatch(posA=(x, ystart - .04),
posB=(x, ystart - .005),
arrowstyle="fancy,head_width=6,head_length=8",
lw=3, fc='k', ec='k', zorder=20)
root.add_patch(arrow)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = "napus-fig4." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | def function[fig4, parameter[args]]:
constant[
%prog fig4 layout data
Napus Figure 4A displays an example deleted region for quartet chromosomes,
showing read alignments from high GL and low GL lines.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[fig4].__doc__]]
call[name[p].add_option, parameter[constant[--gauge_step]]]
<ast.Tuple object at 0x7da18f722e30> assign[=] call[name[p].set_image_options, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18f722890>]]
<ast.Tuple object at 0x7da18f720730> assign[=] name[args]
variable[layout] assign[=] call[name[F4ALayout], parameter[name[layout]]]
variable[gs] assign[=] name[opts].gauge_step
variable[fig] assign[=] call[name[plt].figure, parameter[constant[1], tuple[[<ast.Attribute object at 0x7da18f723b50>, <ast.Attribute object at 0x7da18f720910>]]]]
variable[root] assign[=] call[name[fig].add_axes, parameter[list[[<ast.Constant object at 0x7da18f720250>, <ast.Constant object at 0x7da18f720e20>, <ast.Constant object at 0x7da18f720f10>, <ast.Constant object at 0x7da18f723a90>]]]]
<ast.Tuple object at 0x7da18f722050> assign[=] tuple[[<ast.Constant object at 0x7da18f7232e0>, <ast.Constant object at 0x7da18f722320>, <ast.Constant object at 0x7da18f722e00>]]
variable[s] assign[=] call[name[Synteny], parameter[name[fig], name[root], name[block], name[napusbed], name[slayout]]]
variable[synteny_exts] assign[=] <ast.ListComp object at 0x7da18f722650>
variable[h] assign[=] constant[0.1]
variable[order] assign[=] call[constant[bzh,yudal].split, parameter[constant[,]]]
variable[labels] assign[=] tuple[[<ast.Constant object at 0x7da18f722920>, <ast.Constant object at 0x7da18f721450>, <ast.Constant object at 0x7da18f720ac0>, <ast.Constant object at 0x7da18f723220>]]
for taget[name[t]] in starred[name[layout]] begin[:]
<ast.Tuple object at 0x7da18f721300> assign[=] call[name[synteny_exts]][binary_operation[constant[2] * name[t].i]]
variable[canvas] assign[=] list[[<ast.Name object at 0x7da18f7219f0>, <ast.Attribute object at 0x7da18f723400>, <ast.BinOp object at 0x7da18f723ca0>, <ast.Name object at 0x7da18f721a20>]]
call[name[root].text, parameter[binary_operation[name[xstart] - name[h]], binary_operation[name[t].y + binary_operation[name[h] / constant[2]]], call[name[labels]][name[t].i]]]
<ast.Tuple object at 0x7da18f7217e0> assign[=] call[name[t].box_region.split, parameter[constant[:]]]
<ast.Tuple object at 0x7da18f723a30> assign[=] call[name[ab].split, parameter[constant[-]]]
variable[vlines] assign[=] <ast.ListComp object at 0x7da18f7203d0>
call[name[Coverage], parameter[name[fig], name[root], name[canvas], name[t].seqid, tuple[[<ast.Attribute object at 0x7da18f723700>, <ast.Attribute object at 0x7da18f723cd0>]], name[datadir]]]
<ast.Tuple object at 0x7da18f723df0> assign[=] tuple[[<ast.Tuple object at 0x7da18f722260>, <ast.Tuple object at 0x7da18f720eb0>]]
for taget[name[gid]] in starred[tuple[[<ast.Name object at 0x7da18f720d30>, <ast.Name object at 0x7da18f723c70>]]] begin[:]
<ast.Tuple object at 0x7da18f721b70> assign[=] call[name[s].gg][name[gid]]
<ast.Tuple object at 0x7da18f722aa0> assign[=] name[start]
<ast.Tuple object at 0x7da20e955e10> assign[=] name[end]
variable[x] assign[=] binary_operation[binary_operation[name[xstart] + name[xend]] / constant[2]]
variable[arrow] assign[=] call[name[FancyArrowPatch], parameter[]]
call[name[root].add_patch, parameter[name[arrow]]]
call[name[root].set_xlim, parameter[constant[0], constant[1]]]
call[name[root].set_ylim, parameter[constant[0], constant[1]]]
call[name[root].set_axis_off, parameter[]]
variable[image_name] assign[=] binary_operation[constant[napus-fig4.] + name[iopts].format]
call[name[savefig], parameter[name[image_name]]] | keyword[def] identifier[fig4] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[fig4] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] , identifier[iopts] = identifier[p] . identifier[set_image_options] ( identifier[args] , identifier[figsize] = literal[string] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[layout] , identifier[datadir] = identifier[args]
identifier[layout] = identifier[F4ALayout] ( identifier[layout] , identifier[datadir] = identifier[datadir] )
identifier[gs] = identifier[opts] . identifier[gauge_step]
identifier[fig] = identifier[plt] . identifier[figure] ( literal[int] ,( identifier[iopts] . identifier[w] , identifier[iopts] . identifier[h] ))
identifier[root] = identifier[fig] . identifier[add_axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[block] , identifier[napusbed] , identifier[slayout] = literal[string] , literal[string] , literal[string]
identifier[s] = identifier[Synteny] ( identifier[fig] , identifier[root] , identifier[block] , identifier[napusbed] , identifier[slayout] , identifier[chr_label] = keyword[False] )
identifier[synteny_exts] =[( identifier[x] . identifier[xstart] , identifier[x] . identifier[xend] ) keyword[for] identifier[x] keyword[in] identifier[s] . identifier[rr] ]
identifier[h] = literal[int]
identifier[order] = literal[string] . identifier[split] ( literal[string] )
identifier[labels] =( literal[string] ,
literal[string] ,
literal[string] ,
literal[string] )
keyword[for] identifier[t] keyword[in] identifier[layout] :
identifier[xstart] , identifier[xend] = identifier[synteny_exts] [ literal[int] * identifier[t] . identifier[i] ]
identifier[canvas] =[ identifier[xstart] , identifier[t] . identifier[y] , identifier[xend] - identifier[xstart] , identifier[h] ]
identifier[root] . identifier[text] ( identifier[xstart] - identifier[h] , identifier[t] . identifier[y] + identifier[h] / literal[int] , identifier[labels] [ identifier[t] . identifier[i] ], identifier[ha] = literal[string] , identifier[va] = literal[string] )
identifier[ch] , identifier[ab] = identifier[t] . identifier[box_region] . identifier[split] ( literal[string] )
identifier[a] , identifier[b] = identifier[ab] . identifier[split] ( literal[string] )
identifier[vlines] =[ identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[a] , identifier[b] )]
identifier[Coverage] ( identifier[fig] , identifier[root] , identifier[canvas] , identifier[t] . identifier[seqid] ,( identifier[t] . identifier[start] , identifier[t] . identifier[end] ), identifier[datadir] ,
identifier[order] = identifier[order] , identifier[gauge] = literal[string] , identifier[plot_chr_label] = keyword[False] ,
identifier[gauge_step] = identifier[gs] , identifier[palette] = literal[string] ,
identifier[cap] = literal[int] , identifier[hlsuffix] = literal[string] ,
identifier[vlines] = identifier[vlines] )
identifier[a] , identifier[b] =( literal[int] , literal[string] ),( literal[int] , literal[string] )
keyword[for] identifier[gid] keyword[in] ( identifier[a] , identifier[b] ):
identifier[start] , identifier[end] = identifier[s] . identifier[gg] [ identifier[gid] ]
identifier[xstart] , identifier[ystart] = identifier[start]
identifier[xend] , identifier[yend] = identifier[end]
identifier[x] =( identifier[xstart] + identifier[xend] )/ literal[int]
identifier[arrow] = identifier[FancyArrowPatch] ( identifier[posA] =( identifier[x] , identifier[ystart] - literal[int] ),
identifier[posB] =( identifier[x] , identifier[ystart] - literal[int] ),
identifier[arrowstyle] = literal[string] ,
identifier[lw] = literal[int] , identifier[fc] = literal[string] , identifier[ec] = literal[string] , identifier[zorder] = literal[int] )
identifier[root] . identifier[add_patch] ( identifier[arrow] )
identifier[root] . identifier[set_xlim] ( literal[int] , literal[int] )
identifier[root] . identifier[set_ylim] ( literal[int] , literal[int] )
identifier[root] . identifier[set_axis_off] ()
identifier[image_name] = literal[string] + identifier[iopts] . identifier[format]
identifier[savefig] ( identifier[image_name] , identifier[dpi] = identifier[iopts] . identifier[dpi] , identifier[iopts] = identifier[iopts] ) | def fig4(args):
"""
%prog fig4 layout data
Napus Figure 4A displays an example deleted region for quartet chromosomes,
showing read alignments from high GL and low GL lines.
"""
p = OptionParser(fig4.__doc__)
p.add_option('--gauge_step', default=200000, type='int', help='Step size for the base scale')
(opts, args, iopts) = p.set_image_options(args, figsize='9x7')
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(layout, datadir) = args
layout = F4ALayout(layout, datadir=datadir)
gs = opts.gauge_step
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
(block, napusbed, slayout) = ('r28.txt', 'all.bed', 'r28.layout')
s = Synteny(fig, root, block, napusbed, slayout, chr_label=False)
synteny_exts = [(x.xstart, x.xend) for x in s.rr]
h = 0.1
order = 'bzh,yudal'.split(',')
labels = ('\\textit{B. napus} A$\\mathsf{_n}$2', '\\textit{B. rapa} A$\\mathsf{_r}$2', '\\textit{B. oleracea} C$\\mathsf{_o}$2', '\\textit{B. napus} C$\\mathsf{_n}$2')
for t in layout:
(xstart, xend) = synteny_exts[2 * t.i]
canvas = [xstart, t.y, xend - xstart, h]
root.text(xstart - h, t.y + h / 2, labels[t.i], ha='center', va='center')
(ch, ab) = t.box_region.split(':')
(a, b) = ab.split('-')
vlines = [int(x) for x in (a, b)]
Coverage(fig, root, canvas, t.seqid, (t.start, t.end), datadir, order=order, gauge='top', plot_chr_label=False, gauge_step=gs, palette='gray', cap=40, hlsuffix='regions.forhaibao', vlines=vlines) # depends on [control=['for'], data=['t']]
# Highlight GSL biosynthesis genes
(a, b) = ((3, 'Bra029311'), (5, 'Bo2g161590'))
for gid in (a, b):
(start, end) = s.gg[gid]
(xstart, ystart) = start
(xend, yend) = end
x = (xstart + xend) / 2
arrow = FancyArrowPatch(posA=(x, ystart - 0.04), posB=(x, ystart - 0.005), arrowstyle='fancy,head_width=6,head_length=8', lw=3, fc='k', ec='k', zorder=20)
root.add_patch(arrow) # depends on [control=['for'], data=['gid']]
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = 'napus-fig4.' + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
def _build_install_command_list(cmd_prefix, to_install, to_downgrade, to_reinstall):
'''
Builds a list of install commands to be executed in sequence in order to process
each of the to_install, to_downgrade, and to_reinstall lists.
'''
cmds = []
if to_install:
cmd = copy.deepcopy(cmd_prefix)
cmd.extend(to_install)
cmds.append(cmd)
if to_downgrade:
cmd = copy.deepcopy(cmd_prefix)
cmd.append('--force-downgrade')
cmd.extend(to_downgrade)
cmds.append(cmd)
if to_reinstall:
cmd = copy.deepcopy(cmd_prefix)
cmd.append('--force-reinstall')
cmd.extend(to_reinstall)
cmds.append(cmd)
return cmds | def function[_build_install_command_list, parameter[cmd_prefix, to_install, to_downgrade, to_reinstall]]:
constant[
Builds a list of install commands to be executed in sequence in order to process
each of the to_install, to_downgrade, and to_reinstall lists.
]
variable[cmds] assign[=] list[[]]
if name[to_install] begin[:]
variable[cmd] assign[=] call[name[copy].deepcopy, parameter[name[cmd_prefix]]]
call[name[cmd].extend, parameter[name[to_install]]]
call[name[cmds].append, parameter[name[cmd]]]
if name[to_downgrade] begin[:]
variable[cmd] assign[=] call[name[copy].deepcopy, parameter[name[cmd_prefix]]]
call[name[cmd].append, parameter[constant[--force-downgrade]]]
call[name[cmd].extend, parameter[name[to_downgrade]]]
call[name[cmds].append, parameter[name[cmd]]]
if name[to_reinstall] begin[:]
variable[cmd] assign[=] call[name[copy].deepcopy, parameter[name[cmd_prefix]]]
call[name[cmd].append, parameter[constant[--force-reinstall]]]
call[name[cmd].extend, parameter[name[to_reinstall]]]
call[name[cmds].append, parameter[name[cmd]]]
return[name[cmds]] | keyword[def] identifier[_build_install_command_list] ( identifier[cmd_prefix] , identifier[to_install] , identifier[to_downgrade] , identifier[to_reinstall] ):
literal[string]
identifier[cmds] =[]
keyword[if] identifier[to_install] :
identifier[cmd] = identifier[copy] . identifier[deepcopy] ( identifier[cmd_prefix] )
identifier[cmd] . identifier[extend] ( identifier[to_install] )
identifier[cmds] . identifier[append] ( identifier[cmd] )
keyword[if] identifier[to_downgrade] :
identifier[cmd] = identifier[copy] . identifier[deepcopy] ( identifier[cmd_prefix] )
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[extend] ( identifier[to_downgrade] )
identifier[cmds] . identifier[append] ( identifier[cmd] )
keyword[if] identifier[to_reinstall] :
identifier[cmd] = identifier[copy] . identifier[deepcopy] ( identifier[cmd_prefix] )
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[extend] ( identifier[to_reinstall] )
identifier[cmds] . identifier[append] ( identifier[cmd] )
keyword[return] identifier[cmds] | def _build_install_command_list(cmd_prefix, to_install, to_downgrade, to_reinstall):
"""
Builds a list of install commands to be executed in sequence in order to process
each of the to_install, to_downgrade, and to_reinstall lists.
"""
cmds = []
if to_install:
cmd = copy.deepcopy(cmd_prefix)
cmd.extend(to_install)
cmds.append(cmd) # depends on [control=['if'], data=[]]
if to_downgrade:
cmd = copy.deepcopy(cmd_prefix)
cmd.append('--force-downgrade')
cmd.extend(to_downgrade)
cmds.append(cmd) # depends on [control=['if'], data=[]]
if to_reinstall:
cmd = copy.deepcopy(cmd_prefix)
cmd.append('--force-reinstall')
cmd.extend(to_reinstall)
cmds.append(cmd) # depends on [control=['if'], data=[]]
return cmds |
def save_email(self, list_name, email_msg, index):
"""Save email message into the database."""
msg_id = email_msg.get('Message-ID')
if not msg_id:
return
# Update last imported message into the DB
mailinglist, created = MailingList.objects.get_or_create(
name=list_name
)
mailinglist.last_imported_index = index
if created:
# if the mailinglist is newly created it's sure that the message
# is not in the DB yet.
self.create_email(mailinglist, email_msg)
else:
# If the message is already at the database don't do anything
try:
Message.all_objects.get(
message_id=msg_id,
thread__mailinglist=mailinglist
)
except Message.DoesNotExist:
self.create_email(mailinglist, email_msg)
mailinglist.save() | def function[save_email, parameter[self, list_name, email_msg, index]]:
constant[Save email message into the database.]
variable[msg_id] assign[=] call[name[email_msg].get, parameter[constant[Message-ID]]]
if <ast.UnaryOp object at 0x7da2054a7f40> begin[:]
return[None]
<ast.Tuple object at 0x7da2054a6830> assign[=] call[name[MailingList].objects.get_or_create, parameter[]]
name[mailinglist].last_imported_index assign[=] name[index]
if name[created] begin[:]
call[name[self].create_email, parameter[name[mailinglist], name[email_msg]]]
call[name[mailinglist].save, parameter[]] | keyword[def] identifier[save_email] ( identifier[self] , identifier[list_name] , identifier[email_msg] , identifier[index] ):
literal[string]
identifier[msg_id] = identifier[email_msg] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[msg_id] :
keyword[return]
identifier[mailinglist] , identifier[created] = identifier[MailingList] . identifier[objects] . identifier[get_or_create] (
identifier[name] = identifier[list_name]
)
identifier[mailinglist] . identifier[last_imported_index] = identifier[index]
keyword[if] identifier[created] :
identifier[self] . identifier[create_email] ( identifier[mailinglist] , identifier[email_msg] )
keyword[else] :
keyword[try] :
identifier[Message] . identifier[all_objects] . identifier[get] (
identifier[message_id] = identifier[msg_id] ,
identifier[thread__mailinglist] = identifier[mailinglist]
)
keyword[except] identifier[Message] . identifier[DoesNotExist] :
identifier[self] . identifier[create_email] ( identifier[mailinglist] , identifier[email_msg] )
identifier[mailinglist] . identifier[save] () | def save_email(self, list_name, email_msg, index):
"""Save email message into the database."""
msg_id = email_msg.get('Message-ID')
if not msg_id:
return # depends on [control=['if'], data=[]]
# Update last imported message into the DB
(mailinglist, created) = MailingList.objects.get_or_create(name=list_name)
mailinglist.last_imported_index = index
if created:
# if the mailinglist is newly created it's sure that the message
# is not in the DB yet.
self.create_email(mailinglist, email_msg) # depends on [control=['if'], data=[]]
else:
# If the message is already at the database don't do anything
try:
Message.all_objects.get(message_id=msg_id, thread__mailinglist=mailinglist) # depends on [control=['try'], data=[]]
except Message.DoesNotExist:
self.create_email(mailinglist, email_msg) # depends on [control=['except'], data=[]]
mailinglist.save() |
def get_all_groups(self, start=0, limit=1000):
"""
Get all groups from Confluence User management
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by
fixed system limits. Default: 1000
:return:
"""
url = 'rest/api/group?limit={limit}&start={start}'.format(limit=limit,
start=start)
return (self.get(url) or {}).get('results') | def function[get_all_groups, parameter[self, start, limit]]:
constant[
Get all groups from Confluence User management
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by
fixed system limits. Default: 1000
:return:
]
variable[url] assign[=] call[constant[rest/api/group?limit={limit}&start={start}].format, parameter[]]
return[call[<ast.BoolOp object at 0x7da18f722950>.get, parameter[constant[results]]]] | keyword[def] identifier[get_all_groups] ( identifier[self] , identifier[start] = literal[int] , identifier[limit] = literal[int] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[limit] = identifier[limit] ,
identifier[start] = identifier[start] )
keyword[return] ( identifier[self] . identifier[get] ( identifier[url] ) keyword[or] {}). identifier[get] ( literal[string] ) | def get_all_groups(self, start=0, limit=1000):
"""
Get all groups from Confluence User management
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by
fixed system limits. Default: 1000
:return:
"""
url = 'rest/api/group?limit={limit}&start={start}'.format(limit=limit, start=start)
return (self.get(url) or {}).get('results') |
def get_qualification_score(self, qualification_id, worker_id):
"""Return a worker's qualification score as an iteger.
"""
try:
response = self.mturk.get_qualification_score(
QualificationTypeId=qualification_id, WorkerId=worker_id
)
except ClientError as ex:
error = str(ex)
if "does not exist" in error:
raise WorkerLacksQualification(
"Worker {} does not have qualification {}.".format(
worker_id, qualification_id
)
)
if "operation can be called with a status of: Granted" in error:
raise RevokedQualification(
"Worker {} has had qualification {} revoked.".format(
worker_id, qualification_id
)
)
raise MTurkServiceException(error)
return response["Qualification"]["IntegerValue"] | def function[get_qualification_score, parameter[self, qualification_id, worker_id]]:
constant[Return a worker's qualification score as an iteger.
]
<ast.Try object at 0x7da18ede49d0>
return[call[call[name[response]][constant[Qualification]]][constant[IntegerValue]]] | keyword[def] identifier[get_qualification_score] ( identifier[self] , identifier[qualification_id] , identifier[worker_id] ):
literal[string]
keyword[try] :
identifier[response] = identifier[self] . identifier[mturk] . identifier[get_qualification_score] (
identifier[QualificationTypeId] = identifier[qualification_id] , identifier[WorkerId] = identifier[worker_id]
)
keyword[except] identifier[ClientError] keyword[as] identifier[ex] :
identifier[error] = identifier[str] ( identifier[ex] )
keyword[if] literal[string] keyword[in] identifier[error] :
keyword[raise] identifier[WorkerLacksQualification] (
literal[string] . identifier[format] (
identifier[worker_id] , identifier[qualification_id]
)
)
keyword[if] literal[string] keyword[in] identifier[error] :
keyword[raise] identifier[RevokedQualification] (
literal[string] . identifier[format] (
identifier[worker_id] , identifier[qualification_id]
)
)
keyword[raise] identifier[MTurkServiceException] ( identifier[error] )
keyword[return] identifier[response] [ literal[string] ][ literal[string] ] | def get_qualification_score(self, qualification_id, worker_id):
"""Return a worker's qualification score as an iteger.
"""
try:
response = self.mturk.get_qualification_score(QualificationTypeId=qualification_id, WorkerId=worker_id) # depends on [control=['try'], data=[]]
except ClientError as ex:
error = str(ex)
if 'does not exist' in error:
raise WorkerLacksQualification('Worker {} does not have qualification {}.'.format(worker_id, qualification_id)) # depends on [control=['if'], data=[]]
if 'operation can be called with a status of: Granted' in error:
raise RevokedQualification('Worker {} has had qualification {} revoked.'.format(worker_id, qualification_id)) # depends on [control=['if'], data=[]]
raise MTurkServiceException(error) # depends on [control=['except'], data=['ex']]
return response['Qualification']['IntegerValue'] |
def get_ranks(values):
'''
Converts raw values into ranks for rank correlation coefficients
:param values: list of values (int/float)
:return: a dict mapping value -> rank
'''
ranks = {}
sorted_values = sorted(values)
for i in range(len(sorted_values)):
value = sorted_values[i]
if value not in ranks:
ranks[value] = i + 1
return ranks | def function[get_ranks, parameter[values]]:
constant[
Converts raw values into ranks for rank correlation coefficients
:param values: list of values (int/float)
:return: a dict mapping value -> rank
]
variable[ranks] assign[=] dictionary[[], []]
variable[sorted_values] assign[=] call[name[sorted], parameter[name[values]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sorted_values]]]]]] begin[:]
variable[value] assign[=] call[name[sorted_values]][name[i]]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[ranks]] begin[:]
call[name[ranks]][name[value]] assign[=] binary_operation[name[i] + constant[1]]
return[name[ranks]] | keyword[def] identifier[get_ranks] ( identifier[values] ):
literal[string]
identifier[ranks] ={}
identifier[sorted_values] = identifier[sorted] ( identifier[values] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sorted_values] )):
identifier[value] = identifier[sorted_values] [ identifier[i] ]
keyword[if] identifier[value] keyword[not] keyword[in] identifier[ranks] :
identifier[ranks] [ identifier[value] ]= identifier[i] + literal[int]
keyword[return] identifier[ranks] | def get_ranks(values):
"""
Converts raw values into ranks for rank correlation coefficients
:param values: list of values (int/float)
:return: a dict mapping value -> rank
"""
ranks = {}
sorted_values = sorted(values)
for i in range(len(sorted_values)):
value = sorted_values[i]
if value not in ranks:
ranks[value] = i + 1 # depends on [control=['if'], data=['value', 'ranks']] # depends on [control=['for'], data=['i']]
return ranks |
def replace_strings_in_list(array_of_strigs, replace_with_strings):
"A value in replace_with_strings can be either single string or list of strings"
potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs]
return list(flatten(potentially_nested_list)) | def function[replace_strings_in_list, parameter[array_of_strigs, replace_with_strings]]:
constant[A value in replace_with_strings can be either single string or list of strings]
variable[potentially_nested_list] assign[=] <ast.ListComp object at 0x7da1b20309a0>
return[call[name[list], parameter[call[name[flatten], parameter[name[potentially_nested_list]]]]]] | keyword[def] identifier[replace_strings_in_list] ( identifier[array_of_strigs] , identifier[replace_with_strings] ):
literal[string]
identifier[potentially_nested_list] =[ identifier[replace_with_strings] . identifier[get] ( identifier[s] ) keyword[or] identifier[s] keyword[for] identifier[s] keyword[in] identifier[array_of_strigs] ]
keyword[return] identifier[list] ( identifier[flatten] ( identifier[potentially_nested_list] )) | def replace_strings_in_list(array_of_strigs, replace_with_strings):
"""A value in replace_with_strings can be either single string or list of strings"""
potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs]
return list(flatten(potentially_nested_list)) |
def upgrade_websocket(self, environ, start_response):
"""
Attempt to upgrade the socket environ['wsgi.input'] into a websocket enabled connection.
"""
websocket_version = environ.get('HTTP_SEC_WEBSOCKET_VERSION', '')
if not websocket_version:
raise UpgradeRequiredError
elif websocket_version not in self.WS_VERSIONS:
raise HandshakeError('Unsupported WebSocket Version: {0}'.format(websocket_version))
key = environ.get('HTTP_SEC_WEBSOCKET_KEY', '').strip()
if not key:
raise HandshakeError('Sec-WebSocket-Key header is missing/empty')
try:
key_len = len(base64.b64decode(key))
except TypeError:
raise HandshakeError('Invalid key: {0}'.format(key))
if key_len != 16:
# 5.2.1 (3)
raise HandshakeError('Invalid key: {0}'.format(key))
sec_ws_accept = base64.b64encode(sha1(six.b(key) + self.WS_GUID).digest())
if six.PY3:
sec_ws_accept = sec_ws_accept.decode('ascii')
headers = [
('Upgrade', 'websocket'),
('Connection', 'Upgrade'),
('Sec-WebSocket-Accept', sec_ws_accept),
('Sec-WebSocket-Version', str(websocket_version))
]
if environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL') is not None:
headers.append(('Sec-WebSocket-Protocol', environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')))
logger.debug('WebSocket request accepted, switching protocols')
start_response(force_str('101 Switching Protocols'), headers)
six.get_method_self(start_response).finish_content()
return WebSocket(environ['wsgi.input']) | def function[upgrade_websocket, parameter[self, environ, start_response]]:
constant[
Attempt to upgrade the socket environ['wsgi.input'] into a websocket enabled connection.
]
variable[websocket_version] assign[=] call[name[environ].get, parameter[constant[HTTP_SEC_WEBSOCKET_VERSION], constant[]]]
if <ast.UnaryOp object at 0x7da20c993340> begin[:]
<ast.Raise object at 0x7da20c991e40>
variable[key] assign[=] call[call[name[environ].get, parameter[constant[HTTP_SEC_WEBSOCKET_KEY], constant[]]].strip, parameter[]]
if <ast.UnaryOp object at 0x7da20c9936a0> begin[:]
<ast.Raise object at 0x7da20c993010>
<ast.Try object at 0x7da20c991fc0>
if compare[name[key_len] not_equal[!=] constant[16]] begin[:]
<ast.Raise object at 0x7da20c992980>
variable[sec_ws_accept] assign[=] call[name[base64].b64encode, parameter[call[call[name[sha1], parameter[binary_operation[call[name[six].b, parameter[name[key]]] + name[self].WS_GUID]]].digest, parameter[]]]]
if name[six].PY3 begin[:]
variable[sec_ws_accept] assign[=] call[name[sec_ws_accept].decode, parameter[constant[ascii]]]
variable[headers] assign[=] list[[<ast.Tuple object at 0x7da20c993ee0>, <ast.Tuple object at 0x7da20c990d60>, <ast.Tuple object at 0x7da2041d9f30>, <ast.Tuple object at 0x7da2041da830>]]
if compare[call[name[environ].get, parameter[constant[HTTP_SEC_WEBSOCKET_PROTOCOL]]] is_not constant[None]] begin[:]
call[name[headers].append, parameter[tuple[[<ast.Constant object at 0x7da20c993b80>, <ast.Call object at 0x7da20c993b20>]]]]
call[name[logger].debug, parameter[constant[WebSocket request accepted, switching protocols]]]
call[name[start_response], parameter[call[name[force_str], parameter[constant[101 Switching Protocols]]], name[headers]]]
call[call[name[six].get_method_self, parameter[name[start_response]]].finish_content, parameter[]]
return[call[name[WebSocket], parameter[call[name[environ]][constant[wsgi.input]]]]] | keyword[def] identifier[upgrade_websocket] ( identifier[self] , identifier[environ] , identifier[start_response] ):
literal[string]
identifier[websocket_version] = identifier[environ] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[websocket_version] :
keyword[raise] identifier[UpgradeRequiredError]
keyword[elif] identifier[websocket_version] keyword[not] keyword[in] identifier[self] . identifier[WS_VERSIONS] :
keyword[raise] identifier[HandshakeError] ( literal[string] . identifier[format] ( identifier[websocket_version] ))
identifier[key] = identifier[environ] . identifier[get] ( literal[string] , literal[string] ). identifier[strip] ()
keyword[if] keyword[not] identifier[key] :
keyword[raise] identifier[HandshakeError] ( literal[string] )
keyword[try] :
identifier[key_len] = identifier[len] ( identifier[base64] . identifier[b64decode] ( identifier[key] ))
keyword[except] identifier[TypeError] :
keyword[raise] identifier[HandshakeError] ( literal[string] . identifier[format] ( identifier[key] ))
keyword[if] identifier[key_len] != literal[int] :
keyword[raise] identifier[HandshakeError] ( literal[string] . identifier[format] ( identifier[key] ))
identifier[sec_ws_accept] = identifier[base64] . identifier[b64encode] ( identifier[sha1] ( identifier[six] . identifier[b] ( identifier[key] )+ identifier[self] . identifier[WS_GUID] ). identifier[digest] ())
keyword[if] identifier[six] . identifier[PY3] :
identifier[sec_ws_accept] = identifier[sec_ws_accept] . identifier[decode] ( literal[string] )
identifier[headers] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , identifier[sec_ws_accept] ),
( literal[string] , identifier[str] ( identifier[websocket_version] ))
]
keyword[if] identifier[environ] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[headers] . identifier[append] (( literal[string] , identifier[environ] . identifier[get] ( literal[string] )))
identifier[logger] . identifier[debug] ( literal[string] )
identifier[start_response] ( identifier[force_str] ( literal[string] ), identifier[headers] )
identifier[six] . identifier[get_method_self] ( identifier[start_response] ). identifier[finish_content] ()
keyword[return] identifier[WebSocket] ( identifier[environ] [ literal[string] ]) | def upgrade_websocket(self, environ, start_response):
"""
Attempt to upgrade the socket environ['wsgi.input'] into a websocket enabled connection.
"""
websocket_version = environ.get('HTTP_SEC_WEBSOCKET_VERSION', '')
if not websocket_version:
raise UpgradeRequiredError # depends on [control=['if'], data=[]]
elif websocket_version not in self.WS_VERSIONS:
raise HandshakeError('Unsupported WebSocket Version: {0}'.format(websocket_version)) # depends on [control=['if'], data=['websocket_version']]
key = environ.get('HTTP_SEC_WEBSOCKET_KEY', '').strip()
if not key:
raise HandshakeError('Sec-WebSocket-Key header is missing/empty') # depends on [control=['if'], data=[]]
try:
key_len = len(base64.b64decode(key)) # depends on [control=['try'], data=[]]
except TypeError:
raise HandshakeError('Invalid key: {0}'.format(key)) # depends on [control=['except'], data=[]]
if key_len != 16:
# 5.2.1 (3)
raise HandshakeError('Invalid key: {0}'.format(key)) # depends on [control=['if'], data=[]]
sec_ws_accept = base64.b64encode(sha1(six.b(key) + self.WS_GUID).digest())
if six.PY3:
sec_ws_accept = sec_ws_accept.decode('ascii') # depends on [control=['if'], data=[]]
headers = [('Upgrade', 'websocket'), ('Connection', 'Upgrade'), ('Sec-WebSocket-Accept', sec_ws_accept), ('Sec-WebSocket-Version', str(websocket_version))]
if environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL') is not None:
headers.append(('Sec-WebSocket-Protocol', environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL'))) # depends on [control=['if'], data=[]]
logger.debug('WebSocket request accepted, switching protocols')
start_response(force_str('101 Switching Protocols'), headers)
six.get_method_self(start_response).finish_content()
return WebSocket(environ['wsgi.input']) |
def rgba_bytes_tuple(self, x):
"""Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with int values between 0 and 255.
"""
return tuple(int(u*255.9999) for u in self.rgba_floats_tuple(x)) | def function[rgba_bytes_tuple, parameter[self, x]]:
constant[Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with int values between 0 and 255.
]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1267a60>]]] | keyword[def] identifier[rgba_bytes_tuple] ( identifier[self] , identifier[x] ):
literal[string]
keyword[return] identifier[tuple] ( identifier[int] ( identifier[u] * literal[int] ) keyword[for] identifier[u] keyword[in] identifier[self] . identifier[rgba_floats_tuple] ( identifier[x] )) | def rgba_bytes_tuple(self, x):
"""Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with int values between 0 and 255.
"""
return tuple((int(u * 255.9999) for u in self.rgba_floats_tuple(x))) |
def consume(topic, conf):
"""
Consume User records
"""
from confluent_kafka.avro import AvroConsumer
from confluent_kafka.avro.serializer import SerializerError
print("Consuming user records from topic {} with group {}. ^c to exit.".format(topic, conf["group.id"]))
c = AvroConsumer(conf, reader_value_schema=record_schema)
c.subscribe([topic])
while True:
try:
msg = c.poll(1)
# There were no messages on the queue, continue polling
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
record = User(msg.value())
print("name: {}\n\tfavorite_number: {}\n\tfavorite_color: {}\n".format(
record.name, record.favorite_number, record.favorite_color))
except SerializerError as e:
# Report malformed record, discard results, continue polling
print("Message deserialization failed {}".format(e))
continue
except KeyboardInterrupt:
break
print("Shutting down consumer..")
c.close() | def function[consume, parameter[topic, conf]]:
constant[
Consume User records
]
from relative_module[confluent_kafka.avro] import module[AvroConsumer]
from relative_module[confluent_kafka.avro.serializer] import module[SerializerError]
call[name[print], parameter[call[constant[Consuming user records from topic {} with group {}. ^c to exit.].format, parameter[name[topic], call[name[conf]][constant[group.id]]]]]]
variable[c] assign[=] call[name[AvroConsumer], parameter[name[conf]]]
call[name[c].subscribe, parameter[list[[<ast.Name object at 0x7da20e9b27d0>]]]]
while constant[True] begin[:]
<ast.Try object at 0x7da237eee740>
call[name[print], parameter[constant[Shutting down consumer..]]]
call[name[c].close, parameter[]] | keyword[def] identifier[consume] ( identifier[topic] , identifier[conf] ):
literal[string]
keyword[from] identifier[confluent_kafka] . identifier[avro] keyword[import] identifier[AvroConsumer]
keyword[from] identifier[confluent_kafka] . identifier[avro] . identifier[serializer] keyword[import] identifier[SerializerError]
identifier[print] ( literal[string] . identifier[format] ( identifier[topic] , identifier[conf] [ literal[string] ]))
identifier[c] = identifier[AvroConsumer] ( identifier[conf] , identifier[reader_value_schema] = identifier[record_schema] )
identifier[c] . identifier[subscribe] ([ identifier[topic] ])
keyword[while] keyword[True] :
keyword[try] :
identifier[msg] = identifier[c] . identifier[poll] ( literal[int] )
keyword[if] identifier[msg] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[msg] . identifier[error] ():
identifier[print] ( literal[string] . identifier[format] ( identifier[msg] . identifier[error] ()))
keyword[continue]
identifier[record] = identifier[User] ( identifier[msg] . identifier[value] ())
identifier[print] ( literal[string] . identifier[format] (
identifier[record] . identifier[name] , identifier[record] . identifier[favorite_number] , identifier[record] . identifier[favorite_color] ))
keyword[except] identifier[SerializerError] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[continue]
keyword[except] identifier[KeyboardInterrupt] :
keyword[break]
identifier[print] ( literal[string] )
identifier[c] . identifier[close] () | def consume(topic, conf):
"""
Consume User records
"""
from confluent_kafka.avro import AvroConsumer
from confluent_kafka.avro.serializer import SerializerError
print('Consuming user records from topic {} with group {}. ^c to exit.'.format(topic, conf['group.id']))
c = AvroConsumer(conf, reader_value_schema=record_schema)
c.subscribe([topic])
while True:
try:
msg = c.poll(1)
# There were no messages on the queue, continue polling
if msg is None:
continue # depends on [control=['if'], data=[]]
if msg.error():
print('Consumer error: {}'.format(msg.error()))
continue # depends on [control=['if'], data=[]]
record = User(msg.value())
print('name: {}\n\tfavorite_number: {}\n\tfavorite_color: {}\n'.format(record.name, record.favorite_number, record.favorite_color)) # depends on [control=['try'], data=[]]
except SerializerError as e:
# Report malformed record, discard results, continue polling
print('Message deserialization failed {}'.format(e))
continue # depends on [control=['except'], data=['e']]
except KeyboardInterrupt:
break # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
print('Shutting down consumer..')
c.close() |
def save(self, filename):
'''save fence points to a file'''
f = open(filename, mode='w')
for p in self.points:
f.write("%f\t%f\n" % (p.lat, p.lng))
f.close() | def function[save, parameter[self, filename]]:
constant[save fence points to a file]
variable[f] assign[=] call[name[open], parameter[name[filename]]]
for taget[name[p]] in starred[name[self].points] begin[:]
call[name[f].write, parameter[binary_operation[constant[%f %f
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc04a30>, <ast.Attribute object at 0x7da18dc04850>]]]]]
call[name[f].close, parameter[]] | keyword[def] identifier[save] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[f] = identifier[open] ( identifier[filename] , identifier[mode] = literal[string] )
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[points] :
identifier[f] . identifier[write] ( literal[string] %( identifier[p] . identifier[lat] , identifier[p] . identifier[lng] ))
identifier[f] . identifier[close] () | def save(self, filename):
"""save fence points to a file"""
f = open(filename, mode='w')
for p in self.points:
f.write('%f\t%f\n' % (p.lat, p.lng)) # depends on [control=['for'], data=['p']]
f.close() |
def column_list(string):
"""Validate and convert comma-separated list of column numbers."""
try:
columns = list(map(int, string.split(',')))
except ValueError as e:
raise argparse.ArgumentTypeError(*e.args)
for column in columns:
if column < 1:
raise argparse.ArgumentTypeError(
'Invalid column {!r}: column numbers start at 1.'
.format(column))
return columns | def function[column_list, parameter[string]]:
constant[Validate and convert comma-separated list of column numbers.]
<ast.Try object at 0x7da1b23d7580>
for taget[name[column]] in starred[name[columns]] begin[:]
if compare[name[column] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b23d70d0>
return[name[columns]] | keyword[def] identifier[column_list] ( identifier[string] ):
literal[string]
keyword[try] :
identifier[columns] = identifier[list] ( identifier[map] ( identifier[int] , identifier[string] . identifier[split] ( literal[string] )))
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] (* identifier[e] . identifier[args] )
keyword[for] identifier[column] keyword[in] identifier[columns] :
keyword[if] identifier[column] < literal[int] :
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] (
literal[string]
. identifier[format] ( identifier[column] ))
keyword[return] identifier[columns] | def column_list(string):
"""Validate and convert comma-separated list of column numbers."""
try:
columns = list(map(int, string.split(','))) # depends on [control=['try'], data=[]]
except ValueError as e:
raise argparse.ArgumentTypeError(*e.args) # depends on [control=['except'], data=['e']]
for column in columns:
if column < 1:
raise argparse.ArgumentTypeError('Invalid column {!r}: column numbers start at 1.'.format(column)) # depends on [control=['if'], data=['column']] # depends on [control=['for'], data=['column']]
return columns |
def refresh_address_presence(self, address):
"""
Update synthesized address presence state from cached user presence states.
Triggers callback (if any) in case the state has changed.
This method is only provided to cover an edge case in our use of the Matrix protocol and
should **not** generally be used.
"""
composite_presence = {
self._fetch_user_presence(uid)
for uid
in self._address_to_userids[address]
}
# Iterate over UserPresence in definition order (most to least online) and pick
# first matching state
new_presence = UserPresence.UNKNOWN
for presence in UserPresence.__members__.values():
if presence in composite_presence:
new_presence = presence
break
new_address_reachability = USER_PRESENCE_TO_ADDRESS_REACHABILITY[new_presence]
if new_address_reachability == self._address_to_reachability.get(address):
# Cached address reachability matches new state, do nothing
return
log.debug(
'Changing address presence state',
current_user=self._user_id,
address=to_normalized_address(address),
prev_state=self._address_to_reachability.get(address),
state=new_address_reachability,
)
self._address_to_reachability[address] = new_address_reachability
self._address_reachability_changed_callback(address, new_address_reachability) | def function[refresh_address_presence, parameter[self, address]]:
constant[
Update synthesized address presence state from cached user presence states.
Triggers callback (if any) in case the state has changed.
This method is only provided to cover an edge case in our use of the Matrix protocol and
should **not** generally be used.
]
variable[composite_presence] assign[=] <ast.SetComp object at 0x7da1b18a3ac0>
variable[new_presence] assign[=] name[UserPresence].UNKNOWN
for taget[name[presence]] in starred[call[name[UserPresence].__members__.values, parameter[]]] begin[:]
if compare[name[presence] in name[composite_presence]] begin[:]
variable[new_presence] assign[=] name[presence]
break
variable[new_address_reachability] assign[=] call[name[USER_PRESENCE_TO_ADDRESS_REACHABILITY]][name[new_presence]]
if compare[name[new_address_reachability] equal[==] call[name[self]._address_to_reachability.get, parameter[name[address]]]] begin[:]
return[None]
call[name[log].debug, parameter[constant[Changing address presence state]]]
call[name[self]._address_to_reachability][name[address]] assign[=] name[new_address_reachability]
call[name[self]._address_reachability_changed_callback, parameter[name[address], name[new_address_reachability]]] | keyword[def] identifier[refresh_address_presence] ( identifier[self] , identifier[address] ):
literal[string]
identifier[composite_presence] ={
identifier[self] . identifier[_fetch_user_presence] ( identifier[uid] )
keyword[for] identifier[uid]
keyword[in] identifier[self] . identifier[_address_to_userids] [ identifier[address] ]
}
identifier[new_presence] = identifier[UserPresence] . identifier[UNKNOWN]
keyword[for] identifier[presence] keyword[in] identifier[UserPresence] . identifier[__members__] . identifier[values] ():
keyword[if] identifier[presence] keyword[in] identifier[composite_presence] :
identifier[new_presence] = identifier[presence]
keyword[break]
identifier[new_address_reachability] = identifier[USER_PRESENCE_TO_ADDRESS_REACHABILITY] [ identifier[new_presence] ]
keyword[if] identifier[new_address_reachability] == identifier[self] . identifier[_address_to_reachability] . identifier[get] ( identifier[address] ):
keyword[return]
identifier[log] . identifier[debug] (
literal[string] ,
identifier[current_user] = identifier[self] . identifier[_user_id] ,
identifier[address] = identifier[to_normalized_address] ( identifier[address] ),
identifier[prev_state] = identifier[self] . identifier[_address_to_reachability] . identifier[get] ( identifier[address] ),
identifier[state] = identifier[new_address_reachability] ,
)
identifier[self] . identifier[_address_to_reachability] [ identifier[address] ]= identifier[new_address_reachability]
identifier[self] . identifier[_address_reachability_changed_callback] ( identifier[address] , identifier[new_address_reachability] ) | def refresh_address_presence(self, address):
"""
Update synthesized address presence state from cached user presence states.
Triggers callback (if any) in case the state has changed.
This method is only provided to cover an edge case in our use of the Matrix protocol and
should **not** generally be used.
"""
composite_presence = {self._fetch_user_presence(uid) for uid in self._address_to_userids[address]}
# Iterate over UserPresence in definition order (most to least online) and pick
# first matching state
new_presence = UserPresence.UNKNOWN
for presence in UserPresence.__members__.values():
if presence in composite_presence:
new_presence = presence
break # depends on [control=['if'], data=['presence']] # depends on [control=['for'], data=['presence']]
new_address_reachability = USER_PRESENCE_TO_ADDRESS_REACHABILITY[new_presence]
if new_address_reachability == self._address_to_reachability.get(address):
# Cached address reachability matches new state, do nothing
return # depends on [control=['if'], data=[]]
log.debug('Changing address presence state', current_user=self._user_id, address=to_normalized_address(address), prev_state=self._address_to_reachability.get(address), state=new_address_reachability)
self._address_to_reachability[address] = new_address_reachability
self._address_reachability_changed_callback(address, new_address_reachability) |
def create_user(post_data):
'''
Create the user.
The code used if `False`.
11: standsfor invalid username.
21: standsfor invalide E-mail.
91: standsfor unkown reson.
'''
out_dic = {'success': False, 'code': '00'}
if not tools.check_username_valid(post_data['user_name']):
out_dic['code'] = '11'
return out_dic
if not tools.check_email_valid(post_data['user_email']):
out_dic['code'] = '21'
return out_dic
try:
TabMember.create(uid=tools.get_uuid(),
user_name=post_data['user_name'],
user_pass=tools.md5(post_data['user_pass']),
user_email=post_data['user_email'],
role='1000', # ‘1000' as default role.
time_create=tools.timestamp(),
time_update=tools.timestamp(),
time_reset_passwd=tools.timestamp(),
time_login=tools.timestamp(),
time_email=tools.timestamp())
out_dic['success'] = True
except:
out_dic['code'] = '91'
return out_dic | def function[create_user, parameter[post_data]]:
constant[
Create the user.
The code used if `False`.
11: standsfor invalid username.
21: standsfor invalide E-mail.
91: standsfor unkown reson.
]
variable[out_dic] assign[=] dictionary[[<ast.Constant object at 0x7da1b0466290>, <ast.Constant object at 0x7da1b0467190>], [<ast.Constant object at 0x7da1b0467ca0>, <ast.Constant object at 0x7da1b04640a0>]]
if <ast.UnaryOp object at 0x7da1b0464fa0> begin[:]
call[name[out_dic]][constant[code]] assign[=] constant[11]
return[name[out_dic]]
if <ast.UnaryOp object at 0x7da1b0467070> begin[:]
call[name[out_dic]][constant[code]] assign[=] constant[21]
return[name[out_dic]]
<ast.Try object at 0x7da1b04645b0>
return[name[out_dic]] | keyword[def] identifier[create_user] ( identifier[post_data] ):
literal[string]
identifier[out_dic] ={ literal[string] : keyword[False] , literal[string] : literal[string] }
keyword[if] keyword[not] identifier[tools] . identifier[check_username_valid] ( identifier[post_data] [ literal[string] ]):
identifier[out_dic] [ literal[string] ]= literal[string]
keyword[return] identifier[out_dic]
keyword[if] keyword[not] identifier[tools] . identifier[check_email_valid] ( identifier[post_data] [ literal[string] ]):
identifier[out_dic] [ literal[string] ]= literal[string]
keyword[return] identifier[out_dic]
keyword[try] :
identifier[TabMember] . identifier[create] ( identifier[uid] = identifier[tools] . identifier[get_uuid] (),
identifier[user_name] = identifier[post_data] [ literal[string] ],
identifier[user_pass] = identifier[tools] . identifier[md5] ( identifier[post_data] [ literal[string] ]),
identifier[user_email] = identifier[post_data] [ literal[string] ],
identifier[role] = literal[string] ,
identifier[time_create] = identifier[tools] . identifier[timestamp] (),
identifier[time_update] = identifier[tools] . identifier[timestamp] (),
identifier[time_reset_passwd] = identifier[tools] . identifier[timestamp] (),
identifier[time_login] = identifier[tools] . identifier[timestamp] (),
identifier[time_email] = identifier[tools] . identifier[timestamp] ())
identifier[out_dic] [ literal[string] ]= keyword[True]
keyword[except] :
identifier[out_dic] [ literal[string] ]= literal[string]
keyword[return] identifier[out_dic] | def create_user(post_data):
"""
Create the user.
The code used if `False`.
11: standsfor invalid username.
21: standsfor invalide E-mail.
91: standsfor unkown reson.
"""
out_dic = {'success': False, 'code': '00'}
if not tools.check_username_valid(post_data['user_name']):
out_dic['code'] = '11'
return out_dic # depends on [control=['if'], data=[]]
if not tools.check_email_valid(post_data['user_email']):
out_dic['code'] = '21'
return out_dic # depends on [control=['if'], data=[]]
try: # ‘1000' as default role.
TabMember.create(uid=tools.get_uuid(), user_name=post_data['user_name'], user_pass=tools.md5(post_data['user_pass']), user_email=post_data['user_email'], role='1000', time_create=tools.timestamp(), time_update=tools.timestamp(), time_reset_passwd=tools.timestamp(), time_login=tools.timestamp(), time_email=tools.timestamp())
out_dic['success'] = True # depends on [control=['try'], data=[]]
except:
out_dic['code'] = '91' # depends on [control=['except'], data=[]]
return out_dic |
def iplot_histogram(data, figsize=None, number_to_keep=None,
sort='asc', legend=None):
""" Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
"""
# HTML
html_template = Template("""
<p>
<div id="histogram_$divNumber"></div>
</p>
""")
# JavaScript
javascript_template = Template("""
<script>
requirejs.config({
paths: {
qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"
}
});
require(["qVisualization"], function(qVisualizations) {
qVisualizations.plotState("histogram_$divNumber",
"histogram",
$executions,
$options);
});
</script>
""")
# Process data and execute
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
# set default figure size if none provided
if figsize is None:
figsize = (7, 5)
options = {'number_to_keep': 0 if number_to_keep is None else number_to_keep,
'sort': sort,
'show_legend': 0,
'width': int(figsize[0]),
'height': int(figsize[1])}
if legend:
options['show_legend'] = 1
data_to_plot = []
if isinstance(data, dict):
data = [data]
if legend and len(legend) != len(data):
raise VisualizationError("Length of legendL (%s) doesn't match number "
"of input executions: %s" %
(len(legend), len(data)))
for item, execution in enumerate(data):
exec_data = process_data(execution, options['number_to_keep'])
out_dict = {'data': exec_data}
if legend:
out_dict['name'] = legend[item]
data_to_plot.append(out_dict)
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'divNumber': div_number,
'executions': data_to_plot,
'options': options
})
display(HTML(html + javascript)) | def function[iplot_histogram, parameter[data, figsize, number_to_keep, sort, legend]]:
constant[ Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
]
variable[html_template] assign[=] call[name[Template], parameter[constant[
<p>
<div id="histogram_$divNumber"></div>
</p>
]]]
variable[javascript_template] assign[=] call[name[Template], parameter[constant[
<script>
requirejs.config({
paths: {
qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"
}
});
require(["qVisualization"], function(qVisualizations) {
qVisualizations.plotState("histogram_$divNumber",
"histogram",
$executions,
$options);
});
</script>
]]]
variable[div_number] assign[=] call[name[str], parameter[call[name[time].time, parameter[]]]]
variable[div_number] assign[=] call[name[re].sub, parameter[constant[[.]], constant[], name[div_number]]]
if compare[name[figsize] is constant[None]] begin[:]
variable[figsize] assign[=] tuple[[<ast.Constant object at 0x7da20c6c4e50>, <ast.Constant object at 0x7da20c6c45e0>]]
variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c6bf0>, <ast.Constant object at 0x7da20c6c4b20>, <ast.Constant object at 0x7da20c6c4790>, <ast.Constant object at 0x7da20c6c5210>, <ast.Constant object at 0x7da20c6c5480>], [<ast.IfExp object at 0x7da20c6c5bd0>, <ast.Name object at 0x7da20c6c6560>, <ast.Constant object at 0x7da20c6c7f10>, <ast.Call object at 0x7da20c6c4640>, <ast.Call object at 0x7da20c6c6ec0>]]
if name[legend] begin[:]
call[name[options]][constant[show_legend]] assign[=] constant[1]
variable[data_to_plot] assign[=] list[[]]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
variable[data] assign[=] list[[<ast.Name object at 0x7da207f99600>]]
if <ast.BoolOp object at 0x7da207f9ac20> begin[:]
<ast.Raise object at 0x7da207f9b8e0>
for taget[tuple[[<ast.Name object at 0x7da207f987f0>, <ast.Name object at 0x7da207f99cf0>]]] in starred[call[name[enumerate], parameter[name[data]]]] begin[:]
variable[exec_data] assign[=] call[name[process_data], parameter[name[execution], call[name[options]][constant[number_to_keep]]]]
variable[out_dict] assign[=] dictionary[[<ast.Constant object at 0x7da2047ea560>], [<ast.Name object at 0x7da2047e8550>]]
if name[legend] begin[:]
call[name[out_dict]][constant[name]] assign[=] call[name[legend]][name[item]]
call[name[data_to_plot].append, parameter[name[out_dict]]]
variable[html] assign[=] call[name[html_template].substitute, parameter[dictionary[[<ast.Constant object at 0x7da2047eb190>], [<ast.Name object at 0x7da2047eabf0>]]]]
variable[javascript] assign[=] call[name[javascript_template].substitute, parameter[dictionary[[<ast.Constant object at 0x7da2047ebbb0>, <ast.Constant object at 0x7da2047e8850>, <ast.Constant object at 0x7da2047ebaf0>], [<ast.Name object at 0x7da2047ea8c0>, <ast.Name object at 0x7da2047e8400>, <ast.Name object at 0x7da2047e89a0>]]]]
call[name[display], parameter[call[name[HTML], parameter[binary_operation[name[html] + name[javascript]]]]]] | keyword[def] identifier[iplot_histogram] ( identifier[data] , identifier[figsize] = keyword[None] , identifier[number_to_keep] = keyword[None] ,
identifier[sort] = literal[string] , identifier[legend] = keyword[None] ):
literal[string]
identifier[html_template] = identifier[Template] ( literal[string] )
identifier[javascript_template] = identifier[Template] ( literal[string] )
identifier[div_number] = identifier[str] ( identifier[time] . identifier[time] ())
identifier[div_number] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[div_number] )
keyword[if] identifier[figsize] keyword[is] keyword[None] :
identifier[figsize] =( literal[int] , literal[int] )
identifier[options] ={ literal[string] : literal[int] keyword[if] identifier[number_to_keep] keyword[is] keyword[None] keyword[else] identifier[number_to_keep] ,
literal[string] : identifier[sort] ,
literal[string] : literal[int] ,
literal[string] : identifier[int] ( identifier[figsize] [ literal[int] ]),
literal[string] : identifier[int] ( identifier[figsize] [ literal[int] ])}
keyword[if] identifier[legend] :
identifier[options] [ literal[string] ]= literal[int]
identifier[data_to_plot] =[]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
identifier[data] =[ identifier[data] ]
keyword[if] identifier[legend] keyword[and] identifier[len] ( identifier[legend] )!= identifier[len] ( identifier[data] ):
keyword[raise] identifier[VisualizationError] ( literal[string]
literal[string] %
( identifier[len] ( identifier[legend] ), identifier[len] ( identifier[data] )))
keyword[for] identifier[item] , identifier[execution] keyword[in] identifier[enumerate] ( identifier[data] ):
identifier[exec_data] = identifier[process_data] ( identifier[execution] , identifier[options] [ literal[string] ])
identifier[out_dict] ={ literal[string] : identifier[exec_data] }
keyword[if] identifier[legend] :
identifier[out_dict] [ literal[string] ]= identifier[legend] [ identifier[item] ]
identifier[data_to_plot] . identifier[append] ( identifier[out_dict] )
identifier[html] = identifier[html_template] . identifier[substitute] ({
literal[string] : identifier[div_number]
})
identifier[javascript] = identifier[javascript_template] . identifier[substitute] ({
literal[string] : identifier[div_number] ,
literal[string] : identifier[data_to_plot] ,
literal[string] : identifier[options]
})
identifier[display] ( identifier[HTML] ( identifier[html] + identifier[javascript] )) | def iplot_histogram(data, figsize=None, number_to_keep=None, sort='asc', legend=None):
""" Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
"""
# HTML
html_template = Template('\n <p>\n <div id="histogram_$divNumber"></div>\n </p>\n ')
# JavaScript
javascript_template = Template('\n <script>\n requirejs.config({\n paths: {\n qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"\n }\n });\n\n require(["qVisualization"], function(qVisualizations) {\n qVisualizations.plotState("histogram_$divNumber",\n "histogram",\n $executions,\n $options);\n });\n </script>\n ')
# Process data and execute
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
# set default figure size if none provided
if figsize is None:
figsize = (7, 5) # depends on [control=['if'], data=['figsize']]
options = {'number_to_keep': 0 if number_to_keep is None else number_to_keep, 'sort': sort, 'show_legend': 0, 'width': int(figsize[0]), 'height': int(figsize[1])}
if legend:
options['show_legend'] = 1 # depends on [control=['if'], data=[]]
data_to_plot = []
if isinstance(data, dict):
data = [data] # depends on [control=['if'], data=[]]
if legend and len(legend) != len(data):
raise VisualizationError("Length of legendL (%s) doesn't match number of input executions: %s" % (len(legend), len(data))) # depends on [control=['if'], data=[]]
for (item, execution) in enumerate(data):
exec_data = process_data(execution, options['number_to_keep'])
out_dict = {'data': exec_data}
if legend:
out_dict['name'] = legend[item] # depends on [control=['if'], data=[]]
data_to_plot.append(out_dict) # depends on [control=['for'], data=[]]
html = html_template.substitute({'divNumber': div_number})
javascript = javascript_template.substitute({'divNumber': div_number, 'executions': data_to_plot, 'options': options})
display(HTML(html + javascript)) |
def htmlDocContentDumpOutput(self, cur, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding) | def function[htmlDocContentDumpOutput, parameter[self, cur, encoding]]:
constant[Dump an HTML document. Formating return/spaces are added. ]
if compare[name[cur] is constant[None]] begin[:]
variable[cur__o] assign[=] constant[None]
call[name[libxml2mod].htmlDocContentDumpOutput, parameter[name[self]._o, name[cur__o], name[encoding]]] | keyword[def] identifier[htmlDocContentDumpOutput] ( identifier[self] , identifier[cur] , identifier[encoding] ):
literal[string]
keyword[if] identifier[cur] keyword[is] keyword[None] : identifier[cur__o] = keyword[None]
keyword[else] : identifier[cur__o] = identifier[cur] . identifier[_o]
identifier[libxml2mod] . identifier[htmlDocContentDumpOutput] ( identifier[self] . identifier[_o] , identifier[cur__o] , identifier[encoding] ) | def htmlDocContentDumpOutput(self, cur, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if cur is None:
cur__o = None # depends on [control=['if'], data=[]]
else:
cur__o = cur._o
libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding) |
def compute_angle_weights_1d(angles):
"""
Compute the weight for each angle according to the distance between its
neighbors.
Parameters
----------
angles: 1d ndarray of length A
Angles in radians
Returns
-------
weights: 1d ndarray of length A
The weights for each angle
Notes
-----
To compute the weights, the angles are set modulo PI, not modulo 2PI.
This reduces artifacts when the angular coverage is between PI and 2PI
but does not affect the result when the angles cover the full 2PI interval.
"""
# copy and modulo np.pi
# This is an array with values in [0, np.pi)
angles = (angles.flatten() - angles.min()) % (np.pi)
# sort the array
sortargs = np.argsort(angles)
sortangl = angles[sortargs]
# compute weights for sorted angles
da = (np.roll(sortangl, -1) - np.roll(sortangl, 1)) % (np.pi)
weights = da/np.sum(da)*da.shape[0]
unsortweights = np.zeros_like(weights)
# Sort everything back where it belongs
unsortweights[sortargs] = weights
return unsortweights | def function[compute_angle_weights_1d, parameter[angles]]:
constant[
Compute the weight for each angle according to the distance between its
neighbors.
Parameters
----------
angles: 1d ndarray of length A
Angles in radians
Returns
-------
weights: 1d ndarray of length A
The weights for each angle
Notes
-----
To compute the weights, the angles are set modulo PI, not modulo 2PI.
This reduces artifacts when the angular coverage is between PI and 2PI
but does not affect the result when the angles cover the full 2PI interval.
]
variable[angles] assign[=] binary_operation[binary_operation[call[name[angles].flatten, parameter[]] - call[name[angles].min, parameter[]]] <ast.Mod object at 0x7da2590d6920> name[np].pi]
variable[sortargs] assign[=] call[name[np].argsort, parameter[name[angles]]]
variable[sortangl] assign[=] call[name[angles]][name[sortargs]]
variable[da] assign[=] binary_operation[binary_operation[call[name[np].roll, parameter[name[sortangl], <ast.UnaryOp object at 0x7da20e957460>]] - call[name[np].roll, parameter[name[sortangl], constant[1]]]] <ast.Mod object at 0x7da2590d6920> name[np].pi]
variable[weights] assign[=] binary_operation[binary_operation[name[da] / call[name[np].sum, parameter[name[da]]]] * call[name[da].shape][constant[0]]]
variable[unsortweights] assign[=] call[name[np].zeros_like, parameter[name[weights]]]
call[name[unsortweights]][name[sortargs]] assign[=] name[weights]
return[name[unsortweights]] | keyword[def] identifier[compute_angle_weights_1d] ( identifier[angles] ):
literal[string]
identifier[angles] =( identifier[angles] . identifier[flatten] ()- identifier[angles] . identifier[min] ())%( identifier[np] . identifier[pi] )
identifier[sortargs] = identifier[np] . identifier[argsort] ( identifier[angles] )
identifier[sortangl] = identifier[angles] [ identifier[sortargs] ]
identifier[da] =( identifier[np] . identifier[roll] ( identifier[sortangl] ,- literal[int] )- identifier[np] . identifier[roll] ( identifier[sortangl] , literal[int] ))%( identifier[np] . identifier[pi] )
identifier[weights] = identifier[da] / identifier[np] . identifier[sum] ( identifier[da] )* identifier[da] . identifier[shape] [ literal[int] ]
identifier[unsortweights] = identifier[np] . identifier[zeros_like] ( identifier[weights] )
identifier[unsortweights] [ identifier[sortargs] ]= identifier[weights]
keyword[return] identifier[unsortweights] | def compute_angle_weights_1d(angles):
"""
Compute the weight for each angle according to the distance between its
neighbors.
Parameters
----------
angles: 1d ndarray of length A
Angles in radians
Returns
-------
weights: 1d ndarray of length A
The weights for each angle
Notes
-----
To compute the weights, the angles are set modulo PI, not modulo 2PI.
This reduces artifacts when the angular coverage is between PI and 2PI
but does not affect the result when the angles cover the full 2PI interval.
"""
# copy and modulo np.pi
# This is an array with values in [0, np.pi)
angles = (angles.flatten() - angles.min()) % np.pi
# sort the array
sortargs = np.argsort(angles)
sortangl = angles[sortargs]
# compute weights for sorted angles
da = (np.roll(sortangl, -1) - np.roll(sortangl, 1)) % np.pi
weights = da / np.sum(da) * da.shape[0]
unsortweights = np.zeros_like(weights)
# Sort everything back where it belongs
unsortweights[sortargs] = weights
return unsortweights |
def add_tasks_r(addon_module, package_module, package_name):
'''Recursively iterate through 'package_module' and add every fabric task
to the 'addon_module' keeping the task hierarchy.
Args:
addon_module(types.ModuleType)
package_module(types.ModuleType)
package_name(str): Required, to avoid redundant addition of tasks
Return: None
'''
module_dict = package_module.__dict__
for attr_name, attr_val in module_dict.items():
if isinstance(attr_val, fabric.tasks.WrappedCallableTask):
addon_module.__dict__[attr_name] = attr_val
elif attr_name != package_name \
and isinstance(attr_val, types.ModuleType) \
and attr_val.__name__.startswith('fabsetup_') \
and attr_name.split('.')[-1] != package_name:
submodule_name = flo('{addon_module.__name__}.{attr_name}')
submodule = get_or_create_module_r(submodule_name)
package_module = attr_val
add_tasks_r(submodule, package_module, package_name)
addon_module.__dict__[attr_name] = submodule | def function[add_tasks_r, parameter[addon_module, package_module, package_name]]:
constant[Recursively iterate through 'package_module' and add every fabric task
to the 'addon_module' keeping the task hierarchy.
Args:
addon_module(types.ModuleType)
package_module(types.ModuleType)
package_name(str): Required, to avoid redundant addition of tasks
Return: None
]
variable[module_dict] assign[=] name[package_module].__dict__
for taget[tuple[[<ast.Name object at 0x7da1b2566260>, <ast.Name object at 0x7da1b2564d60>]]] in starred[call[name[module_dict].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[attr_val], name[fabric].tasks.WrappedCallableTask]] begin[:]
call[name[addon_module].__dict__][name[attr_name]] assign[=] name[attr_val] | keyword[def] identifier[add_tasks_r] ( identifier[addon_module] , identifier[package_module] , identifier[package_name] ):
literal[string]
identifier[module_dict] = identifier[package_module] . identifier[__dict__]
keyword[for] identifier[attr_name] , identifier[attr_val] keyword[in] identifier[module_dict] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[attr_val] , identifier[fabric] . identifier[tasks] . identifier[WrappedCallableTask] ):
identifier[addon_module] . identifier[__dict__] [ identifier[attr_name] ]= identifier[attr_val]
keyword[elif] identifier[attr_name] != identifier[package_name] keyword[and] identifier[isinstance] ( identifier[attr_val] , identifier[types] . identifier[ModuleType] ) keyword[and] identifier[attr_val] . identifier[__name__] . identifier[startswith] ( literal[string] ) keyword[and] identifier[attr_name] . identifier[split] ( literal[string] )[- literal[int] ]!= identifier[package_name] :
identifier[submodule_name] = identifier[flo] ( literal[string] )
identifier[submodule] = identifier[get_or_create_module_r] ( identifier[submodule_name] )
identifier[package_module] = identifier[attr_val]
identifier[add_tasks_r] ( identifier[submodule] , identifier[package_module] , identifier[package_name] )
identifier[addon_module] . identifier[__dict__] [ identifier[attr_name] ]= identifier[submodule] | def add_tasks_r(addon_module, package_module, package_name):
"""Recursively iterate through 'package_module' and add every fabric task
to the 'addon_module' keeping the task hierarchy.
Args:
addon_module(types.ModuleType)
package_module(types.ModuleType)
package_name(str): Required, to avoid redundant addition of tasks
Return: None
"""
module_dict = package_module.__dict__
for (attr_name, attr_val) in module_dict.items():
if isinstance(attr_val, fabric.tasks.WrappedCallableTask):
addon_module.__dict__[attr_name] = attr_val # depends on [control=['if'], data=[]]
elif attr_name != package_name and isinstance(attr_val, types.ModuleType) and attr_val.__name__.startswith('fabsetup_') and (attr_name.split('.')[-1] != package_name):
submodule_name = flo('{addon_module.__name__}.{attr_name}')
submodule = get_or_create_module_r(submodule_name)
package_module = attr_val
add_tasks_r(submodule, package_module, package_name)
addon_module.__dict__[attr_name] = submodule # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Scheme is not sqlite: ' + db_uri)
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri)
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri)
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda: sqlite3.connect(path, **params) | def function[create_sqlite_connection_provider, parameter[db_uri]]:
constant[Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
]
variable[uri] assign[=] call[name[urlparse].urlparse, parameter[name[db_uri]]]
if compare[name[uri].scheme not_equal[!=] constant[sqlite]] begin[:]
<ast.Raise object at 0x7da1b2115ea0>
if name[uri].netloc begin[:]
<ast.Raise object at 0x7da1b2116890>
if compare[name[uri].path equal[==] constant[:memory:]] begin[:]
<ast.Raise object at 0x7da1b21168c0>
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[uri].path]]
variable[params] assign[=] call[name[_get_connect_params], parameter[name[uri].query]]
return[<ast.Lambda object at 0x7da1b21154b0>] | keyword[def] identifier[create_sqlite_connection_provider] ( identifier[db_uri] ):
literal[string]
identifier[uri] = identifier[urlparse] . identifier[urlparse] ( identifier[db_uri] )
keyword[if] identifier[uri] . identifier[scheme] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[db_uri] )
keyword[if] identifier[uri] . identifier[netloc] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[db_uri] )
keyword[if] identifier[uri] . identifier[path] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[db_uri] )
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[uri] . identifier[path] )
identifier[params] = identifier[_get_connect_params] ( identifier[uri] . identifier[query] )
keyword[return] keyword[lambda] : identifier[sqlite3] . identifier[connect] ( identifier[path] ,** identifier[params] ) | def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Scheme is not sqlite: ' + db_uri) # depends on [control=['if'], data=[]]
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri) # depends on [control=['if'], data=[]]
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri) # depends on [control=['if'], data=[]]
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda : sqlite3.connect(path, **params) |
def wait(self, timeout=0):
"""Blocks until timeout (seconds) or forever
:param timeout: time to wait, in seconds
:return:
"""
start_time = time.time()
# We return synchronously, so we block in a busy loop waiting for the
# request to be done.
while not self.is_done:
duration = time.time() - start_time
if timeout and duration > timeout:
raise CloudTimeoutError(
"Timeout getting async value. Timeout: %d seconds" % timeout
)
time.sleep(0.1)
# If we get an any status code other than a 2xx we raise an exception to the user, which can then be handled
# accordingly.
status_code, error_msg, payload = self.check_error()
if not self._status_ok(status_code):
raise CloudAsyncError("Async response for '%s' returned an error." % self.async_id,
reason=error_msg,
status=status_code)
value = self.value
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
return value | def function[wait, parameter[self, timeout]]:
constant[Blocks until timeout (seconds) or forever
:param timeout: time to wait, in seconds
:return:
]
variable[start_time] assign[=] call[name[time].time, parameter[]]
while <ast.UnaryOp object at 0x7da1b04d6d40> begin[:]
variable[duration] assign[=] binary_operation[call[name[time].time, parameter[]] - name[start_time]]
if <ast.BoolOp object at 0x7da1b04d4a60> begin[:]
<ast.Raise object at 0x7da1b04d74f0>
call[name[time].sleep, parameter[constant[0.1]]]
<ast.Tuple object at 0x7da1b04b6b30> assign[=] call[name[self].check_error, parameter[]]
if <ast.UnaryOp object at 0x7da1b040f880> begin[:]
<ast.Raise object at 0x7da1b040ef50>
variable[value] assign[=] name[self].value
if call[name[isinstance], parameter[name[value], name[six].binary_type]] begin[:]
variable[value] assign[=] call[name[value].decode, parameter[constant[utf-8]]]
return[name[value]] | keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
identifier[start_time] = identifier[time] . identifier[time] ()
keyword[while] keyword[not] identifier[self] . identifier[is_done] :
identifier[duration] = identifier[time] . identifier[time] ()- identifier[start_time]
keyword[if] identifier[timeout] keyword[and] identifier[duration] > identifier[timeout] :
keyword[raise] identifier[CloudTimeoutError] (
literal[string] % identifier[timeout]
)
identifier[time] . identifier[sleep] ( literal[int] )
identifier[status_code] , identifier[error_msg] , identifier[payload] = identifier[self] . identifier[check_error] ()
keyword[if] keyword[not] identifier[self] . identifier[_status_ok] ( identifier[status_code] ):
keyword[raise] identifier[CloudAsyncError] ( literal[string] % identifier[self] . identifier[async_id] ,
identifier[reason] = identifier[error_msg] ,
identifier[status] = identifier[status_code] )
identifier[value] = identifier[self] . identifier[value]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[binary_type] ):
identifier[value] = identifier[value] . identifier[decode] ( literal[string] )
keyword[return] identifier[value] | def wait(self, timeout=0):
"""Blocks until timeout (seconds) or forever
:param timeout: time to wait, in seconds
:return:
"""
start_time = time.time()
# We return synchronously, so we block in a busy loop waiting for the
# request to be done.
while not self.is_done:
duration = time.time() - start_time
if timeout and duration > timeout:
raise CloudTimeoutError('Timeout getting async value. Timeout: %d seconds' % timeout) # depends on [control=['if'], data=[]]
time.sleep(0.1) # depends on [control=['while'], data=[]]
# If we get an any status code other than a 2xx we raise an exception to the user, which can then be handled
# accordingly.
(status_code, error_msg, payload) = self.check_error()
if not self._status_ok(status_code):
raise CloudAsyncError("Async response for '%s' returned an error." % self.async_id, reason=error_msg, status=status_code) # depends on [control=['if'], data=[]]
value = self.value
if isinstance(value, six.binary_type):
value = value.decode('utf-8') # depends on [control=['if'], data=[]]
return value |
def get_ref_favorites(self, project, repository_id=None, identity_id=None):
"""GetRefFavorites.
[Preview API] Gets the refs favorites for a repo and an identity.
:param str project: Project ID or project name
:param str repository_id: The id of the repository.
:param str identity_id: The id of the identity whose favorites are to be retrieved. If null, the requesting identity is used.
:rtype: [GitRefFavorite]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if identity_id is not None:
query_parameters['identityId'] = self._serialize.query('identity_id', identity_id, 'str')
response = self._send(http_method='GET',
location_id='876f70af-5792-485a-a1c7-d0a7b2f42bbb',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitRefFavorite]', self._unwrap_collection(response)) | def function[get_ref_favorites, parameter[self, project, repository_id, identity_id]]:
constant[GetRefFavorites.
[Preview API] Gets the refs favorites for a repo and an identity.
:param str project: Project ID or project name
:param str repository_id: The id of the repository.
:param str identity_id: The id of the identity whose favorites are to be retrieved. If null, the requesting identity is used.
:rtype: [GitRefFavorite]
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[repository_id] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[repositoryId]] assign[=] call[name[self]._serialize.query, parameter[constant[repository_id], name[repository_id], constant[str]]]
if compare[name[identity_id] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[identityId]] assign[=] call[name[self]._serialize.query, parameter[constant[identity_id], name[identity_id], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[[GitRefFavorite]], call[name[self]._unwrap_collection, parameter[name[response]]]]]] | keyword[def] identifier[get_ref_favorites] ( identifier[self] , identifier[project] , identifier[repository_id] = keyword[None] , identifier[identity_id] = keyword[None] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[repository_id] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[repository_id] , literal[string] )
keyword[if] identifier[identity_id] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[identity_id] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] )) | def get_ref_favorites(self, project, repository_id=None, identity_id=None):
"""GetRefFavorites.
[Preview API] Gets the refs favorites for a repo and an identity.
:param str project: Project ID or project name
:param str repository_id: The id of the repository.
:param str identity_id: The id of the identity whose favorites are to be retrieved. If null, the requesting identity is used.
:rtype: [GitRefFavorite]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
query_parameters = {}
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str') # depends on [control=['if'], data=['repository_id']]
if identity_id is not None:
query_parameters['identityId'] = self._serialize.query('identity_id', identity_id, 'str') # depends on [control=['if'], data=['identity_id']]
response = self._send(http_method='GET', location_id='876f70af-5792-485a-a1c7-d0a7b2f42bbb', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters)
return self._deserialize('[GitRefFavorite]', self._unwrap_collection(response)) |
def as_string(self, chars, current_linkable=False, class_current="active_link"):
"""
It returns menu as string
"""
return self.__do_menu("as_string", current_linkable, class_current, chars) | def function[as_string, parameter[self, chars, current_linkable, class_current]]:
constant[
It returns menu as string
]
return[call[name[self].__do_menu, parameter[constant[as_string], name[current_linkable], name[class_current], name[chars]]]] | keyword[def] identifier[as_string] ( identifier[self] , identifier[chars] , identifier[current_linkable] = keyword[False] , identifier[class_current] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[__do_menu] ( literal[string] , identifier[current_linkable] , identifier[class_current] , identifier[chars] ) | def as_string(self, chars, current_linkable=False, class_current='active_link'):
"""
It returns menu as string
"""
return self.__do_menu('as_string', current_linkable, class_current, chars) |
def log_player_ends_turn(self, player):
"""
:param player: catan.game.Player
"""
seconds_delta = (datetime.datetime.now() - self._latest_timestamp).total_seconds()
self._logln('{0} ends turn after {1}s'.format(player.color, round(seconds_delta)))
self._latest_timestamp = datetime.datetime.now() | def function[log_player_ends_turn, parameter[self, player]]:
constant[
:param player: catan.game.Player
]
variable[seconds_delta] assign[=] call[binary_operation[call[name[datetime].datetime.now, parameter[]] - name[self]._latest_timestamp].total_seconds, parameter[]]
call[name[self]._logln, parameter[call[constant[{0} ends turn after {1}s].format, parameter[name[player].color, call[name[round], parameter[name[seconds_delta]]]]]]]
name[self]._latest_timestamp assign[=] call[name[datetime].datetime.now, parameter[]] | keyword[def] identifier[log_player_ends_turn] ( identifier[self] , identifier[player] ):
literal[string]
identifier[seconds_delta] =( identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_latest_timestamp] ). identifier[total_seconds] ()
identifier[self] . identifier[_logln] ( literal[string] . identifier[format] ( identifier[player] . identifier[color] , identifier[round] ( identifier[seconds_delta] )))
identifier[self] . identifier[_latest_timestamp] = identifier[datetime] . identifier[datetime] . identifier[now] () | def log_player_ends_turn(self, player):
"""
:param player: catan.game.Player
"""
seconds_delta = (datetime.datetime.now() - self._latest_timestamp).total_seconds()
self._logln('{0} ends turn after {1}s'.format(player.color, round(seconds_delta)))
self._latest_timestamp = datetime.datetime.now() |
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter)) | def function[register, parameter[linter]]:
constant[required method to auto register this checker]
call[name[linter].register_checker, parameter[call[name[BasicErrorChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[BasicChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[NameChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[DocStringChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[PassChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[ComparisonChecker], parameter[name[linter]]]]] | keyword[def] identifier[register] ( identifier[linter] ):
literal[string]
identifier[linter] . identifier[register_checker] ( identifier[BasicErrorChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[BasicChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[NameChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[DocStringChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[PassChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[ComparisonChecker] ( identifier[linter] )) | def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter)) |
def _console(console: Any) -> Any:
"""Return a cffi console."""
try:
return console.console_c
except AttributeError:
warnings.warn(
(
"Falsy console parameters are deprecated, "
"always use the root console instance returned by "
"console_init_root."
),
DeprecationWarning,
stacklevel=3,
)
return ffi.NULL | def function[_console, parameter[console]]:
constant[Return a cffi console.]
<ast.Try object at 0x7da1b117a8c0> | keyword[def] identifier[_console] ( identifier[console] : identifier[Any] )-> identifier[Any] :
literal[string]
keyword[try] :
keyword[return] identifier[console] . identifier[console_c]
keyword[except] identifier[AttributeError] :
identifier[warnings] . identifier[warn] (
(
literal[string]
literal[string]
literal[string]
),
identifier[DeprecationWarning] ,
identifier[stacklevel] = literal[int] ,
)
keyword[return] identifier[ffi] . identifier[NULL] | def _console(console: Any) -> Any:
"""Return a cffi console."""
try:
return console.console_c # depends on [control=['try'], data=[]]
except AttributeError:
warnings.warn('Falsy console parameters are deprecated, always use the root console instance returned by console_init_root.', DeprecationWarning, stacklevel=3)
return ffi.NULL # depends on [control=['except'], data=[]] |
def _caveat_v2_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
'''
serialized = {}
if len(c.caveat_id_bytes) > 0:
_add_json_binary_field(c.caveat_id_bytes, serialized, 'i')
if c.verification_key_id:
_add_json_binary_field(c.verification_key_id, serialized, 'v')
if c.location:
serialized['l'] = c.location
return serialized | def function[_caveat_v2_to_dict, parameter[c]]:
constant[ Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
]
variable[serialized] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[c].caveat_id_bytes]] greater[>] constant[0]] begin[:]
call[name[_add_json_binary_field], parameter[name[c].caveat_id_bytes, name[serialized], constant[i]]]
if name[c].verification_key_id begin[:]
call[name[_add_json_binary_field], parameter[name[c].verification_key_id, name[serialized], constant[v]]]
if name[c].location begin[:]
call[name[serialized]][constant[l]] assign[=] name[c].location
return[name[serialized]] | keyword[def] identifier[_caveat_v2_to_dict] ( identifier[c] ):
literal[string]
identifier[serialized] ={}
keyword[if] identifier[len] ( identifier[c] . identifier[caveat_id_bytes] )> literal[int] :
identifier[_add_json_binary_field] ( identifier[c] . identifier[caveat_id_bytes] , identifier[serialized] , literal[string] )
keyword[if] identifier[c] . identifier[verification_key_id] :
identifier[_add_json_binary_field] ( identifier[c] . identifier[verification_key_id] , identifier[serialized] , literal[string] )
keyword[if] identifier[c] . identifier[location] :
identifier[serialized] [ literal[string] ]= identifier[c] . identifier[location]
keyword[return] identifier[serialized] | def _caveat_v2_to_dict(c):
""" Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
"""
serialized = {}
if len(c.caveat_id_bytes) > 0:
_add_json_binary_field(c.caveat_id_bytes, serialized, 'i') # depends on [control=['if'], data=[]]
if c.verification_key_id:
_add_json_binary_field(c.verification_key_id, serialized, 'v') # depends on [control=['if'], data=[]]
if c.location:
serialized['l'] = c.location # depends on [control=['if'], data=[]]
return serialized |
def get_unread_messages(self,
include_me=False,
include_notifications=False):
"""
I fetch unread messages.
:param include_me: if user's messages are to be included
:type include_me: bool
:param include_notifications: if events happening on chat are to be included
:type include_notifications: bool
:return: list of unread messages
:rtype: list
"""
return list(self.driver.get_unread_messages_in_chat(
self.id,
include_me,
include_notifications
)) | def function[get_unread_messages, parameter[self, include_me, include_notifications]]:
constant[
I fetch unread messages.
:param include_me: if user's messages are to be included
:type include_me: bool
:param include_notifications: if events happening on chat are to be included
:type include_notifications: bool
:return: list of unread messages
:rtype: list
]
return[call[name[list], parameter[call[name[self].driver.get_unread_messages_in_chat, parameter[name[self].id, name[include_me], name[include_notifications]]]]]] | keyword[def] identifier[get_unread_messages] ( identifier[self] ,
identifier[include_me] = keyword[False] ,
identifier[include_notifications] = keyword[False] ):
literal[string]
keyword[return] identifier[list] ( identifier[self] . identifier[driver] . identifier[get_unread_messages_in_chat] (
identifier[self] . identifier[id] ,
identifier[include_me] ,
identifier[include_notifications]
)) | def get_unread_messages(self, include_me=False, include_notifications=False):
"""
I fetch unread messages.
:param include_me: if user's messages are to be included
:type include_me: bool
:param include_notifications: if events happening on chat are to be included
:type include_notifications: bool
:return: list of unread messages
:rtype: list
"""
return list(self.driver.get_unread_messages_in_chat(self.id, include_me, include_notifications)) |
def purge_run(self, event):
"""Run purge for the object with ``location_id`` specified in ``event`` argument."""
location_id = event['location_id']
verbosity = event['verbosity']
try:
logger.info(__("Running purge for location id {}.", location_id))
location_purge(location_id=location_id, delete=True, verbosity=verbosity)
except Exception: # pylint: disable=broad-except
logger.exception("Error while purging location.", extra={'location_id': location_id}) | def function[purge_run, parameter[self, event]]:
constant[Run purge for the object with ``location_id`` specified in ``event`` argument.]
variable[location_id] assign[=] call[name[event]][constant[location_id]]
variable[verbosity] assign[=] call[name[event]][constant[verbosity]]
<ast.Try object at 0x7da1b1ade0e0> | keyword[def] identifier[purge_run] ( identifier[self] , identifier[event] ):
literal[string]
identifier[location_id] = identifier[event] [ literal[string] ]
identifier[verbosity] = identifier[event] [ literal[string] ]
keyword[try] :
identifier[logger] . identifier[info] ( identifier[__] ( literal[string] , identifier[location_id] ))
identifier[location_purge] ( identifier[location_id] = identifier[location_id] , identifier[delete] = keyword[True] , identifier[verbosity] = identifier[verbosity] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[extra] ={ literal[string] : identifier[location_id] }) | def purge_run(self, event):
"""Run purge for the object with ``location_id`` specified in ``event`` argument."""
location_id = event['location_id']
verbosity = event['verbosity']
try:
logger.info(__('Running purge for location id {}.', location_id))
location_purge(location_id=location_id, delete=True, verbosity=verbosity) # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
logger.exception('Error while purging location.', extra={'location_id': location_id}) # depends on [control=['except'], data=[]] |
def altitude_diff(msg):
"""Decode the differece between GNSS and barometric altitude
Args:
msg (string): 28 bytes hexadecimal message string, TC=19
Returns:
int: Altitude difference in ft. Negative value indicates GNSS altitude
below barometric altitude.
"""
tc = common.typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not a airborne velocity message, expecting TC=19" % msg)
msgbin = common.hex2bin(msg)
sign = -1 if int(msgbin[80]) else 1
value = common.bin2int(msgbin[81:88])
if value == 0 or value == 127:
return None
else:
return sign * (value - 1) * 25 | def function[altitude_diff, parameter[msg]]:
constant[Decode the differece between GNSS and barometric altitude
Args:
msg (string): 28 bytes hexadecimal message string, TC=19
Returns:
int: Altitude difference in ft. Negative value indicates GNSS altitude
below barometric altitude.
]
variable[tc] assign[=] call[name[common].typecode, parameter[name[msg]]]
if compare[name[tc] not_equal[!=] constant[19]] begin[:]
<ast.Raise object at 0x7da18dc06c50>
variable[msgbin] assign[=] call[name[common].hex2bin, parameter[name[msg]]]
variable[sign] assign[=] <ast.IfExp object at 0x7da18dc041f0>
variable[value] assign[=] call[name[common].bin2int, parameter[call[name[msgbin]][<ast.Slice object at 0x7da18dc04370>]]]
if <ast.BoolOp object at 0x7da18dc07730> begin[:]
return[constant[None]] | keyword[def] identifier[altitude_diff] ( identifier[msg] ):
literal[string]
identifier[tc] = identifier[common] . identifier[typecode] ( identifier[msg] )
keyword[if] identifier[tc] != literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[msg] )
identifier[msgbin] = identifier[common] . identifier[hex2bin] ( identifier[msg] )
identifier[sign] =- literal[int] keyword[if] identifier[int] ( identifier[msgbin] [ literal[int] ]) keyword[else] literal[int]
identifier[value] = identifier[common] . identifier[bin2int] ( identifier[msgbin] [ literal[int] : literal[int] ])
keyword[if] identifier[value] == literal[int] keyword[or] identifier[value] == literal[int] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[sign] *( identifier[value] - literal[int] )* literal[int] | def altitude_diff(msg):
"""Decode the differece between GNSS and barometric altitude
Args:
msg (string): 28 bytes hexadecimal message string, TC=19
Returns:
int: Altitude difference in ft. Negative value indicates GNSS altitude
below barometric altitude.
"""
tc = common.typecode(msg)
if tc != 19:
raise RuntimeError('%s: Not a airborne velocity message, expecting TC=19' % msg) # depends on [control=['if'], data=[]]
msgbin = common.hex2bin(msg)
sign = -1 if int(msgbin[80]) else 1
value = common.bin2int(msgbin[81:88])
if value == 0 or value == 127:
return None # depends on [control=['if'], data=[]]
else:
return sign * (value - 1) * 25 |
def delete_acl(self, name):
"""Delete an acl."""
if name not in self._acl:
return False
del self._acl[name]
return True | def function[delete_acl, parameter[self, name]]:
constant[Delete an acl.]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._acl] begin[:]
return[constant[False]]
<ast.Delete object at 0x7da2045645e0>
return[constant[True]] | keyword[def] identifier[delete_acl] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_acl] :
keyword[return] keyword[False]
keyword[del] identifier[self] . identifier[_acl] [ identifier[name] ]
keyword[return] keyword[True] | def delete_acl(self, name):
"""Delete an acl."""
if name not in self._acl:
return False # depends on [control=['if'], data=[]]
del self._acl[name]
return True |
def find_closing_braces(self, query):
"""Find the index of the closing braces for the opening braces
at the start of the query string. Note that first character
of input string must be an opening braces."""
if query[0] != '(':
raise Exception("Trying to find closing braces for no opening braces")
num_open_braces = 0
for i in range(len(query)):
c = query[i]
if c == '(':
num_open_braces += 1
elif c == ')':
num_open_braces -= 1
if num_open_braces == 0:
return i
raise Exception("No closing braces found") | def function[find_closing_braces, parameter[self, query]]:
constant[Find the index of the closing braces for the opening braces
at the start of the query string. Note that first character
of input string must be an opening braces.]
if compare[call[name[query]][constant[0]] not_equal[!=] constant[(]] begin[:]
<ast.Raise object at 0x7da1b2344eb0>
variable[num_open_braces] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[query]]]]]] begin[:]
variable[c] assign[=] call[name[query]][name[i]]
if compare[name[c] equal[==] constant[(]] begin[:]
<ast.AugAssign object at 0x7da18fe90040>
if compare[name[num_open_braces] equal[==] constant[0]] begin[:]
return[name[i]]
<ast.Raise object at 0x7da18fe93b20> | keyword[def] identifier[find_closing_braces] ( identifier[self] , identifier[query] ):
literal[string]
keyword[if] identifier[query] [ literal[int] ]!= literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[num_open_braces] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[query] )):
identifier[c] = identifier[query] [ identifier[i] ]
keyword[if] identifier[c] == literal[string] :
identifier[num_open_braces] += literal[int]
keyword[elif] identifier[c] == literal[string] :
identifier[num_open_braces] -= literal[int]
keyword[if] identifier[num_open_braces] == literal[int] :
keyword[return] identifier[i]
keyword[raise] identifier[Exception] ( literal[string] ) | def find_closing_braces(self, query):
"""Find the index of the closing braces for the opening braces
at the start of the query string. Note that first character
of input string must be an opening braces."""
if query[0] != '(':
raise Exception('Trying to find closing braces for no opening braces') # depends on [control=['if'], data=[]]
num_open_braces = 0
for i in range(len(query)):
c = query[i]
if c == '(':
num_open_braces += 1 # depends on [control=['if'], data=[]]
elif c == ')':
num_open_braces -= 1 # depends on [control=['if'], data=[]]
if num_open_braces == 0:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
raise Exception('No closing braces found') |
def proximal_box_constraint(space, lower=None, upper=None):
r"""Proximal operator factory for ``G(x) = ind(a <= x <= b)``.
If P is the set of elements with a <= x <= b, the indicator function of
which is defined as::
ind(a <= x <= b) = {0 if x in P, infinity if x is not in P}
with x being an element in ``space``.
Parameters
----------
space : `LinearSpace`
Domain of the functional G(x)
lower : ``space.field`` element or ``space`` `element-like`, optional
The lower bound.
Default: ``None``, interpreted as -infinity
upper : ``space.field`` element or ``space`` `element-like`, optional
The upper bound.
Default: ``None``, interpreted as +infinity
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
If :math:`P` is an interval :math:`[a,b]`, the indicator function is
defined as
.. math::
I_{P}(x) = \begin{cases}
0 & \text{if } x \in P, \\
\infty & \text{if } x \not \in P
\end{cases}
For a step size :math:`\sigma`, the proximal operator of
:math:`\sigma I_{P}` is given by the projection onto the interval
.. math::
\mathrm{prox}_{\sigma I_{P}}(x) = \begin{cases}
a & \text{if } x < a, \\
x & \text{if } x \in [a,b], \\
b & \text{if } x > b.
\end{cases}
The proximal operator is independent of :math:`\sigma` and invariant under
a positive rescaling of :math:`I_{P}(x)`, since that leaves the indicator
function unchanged.
For spaces of the form :math:`R^n`, the definition extends naturally
in each component.
See Also
--------
proximal_nonnegativity : Special case with ``lower=0, upper=infty``
"""
# Convert element-likes if needed, also does some space checking
if lower is not None and lower not in space and lower not in space.field:
lower = space.element(lower)
if upper is not None and upper not in space and upper not in space.field:
upper = space.element(upper)
if lower in space.field and upper in space.field:
if lower > upper:
raise ValueError('invalid values, `lower` ({}) > `upper` ({})'
''.format(lower, upper))
class ProxOpBoxConstraint(Operator):
"""Proximal operator for G(x) = ind(a <= x <= b)."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter, not used.
"""
super(ProxOpBoxConstraint, self).__init__(
domain=space, range=space, linear=False)
def _call(self, x, out):
"""Apply the operator to ``x`` and store the result in ``out``."""
if lower is not None and upper is None:
x.ufuncs.maximum(lower, out=out)
elif lower is None and upper is not None:
x.ufuncs.minimum(upper, out=out)
elif lower is not None and upper is not None:
x.ufuncs.maximum(lower, out=out)
out.ufuncs.minimum(upper, out=out)
else:
out.assign(x)
return ProxOpBoxConstraint | def function[proximal_box_constraint, parameter[space, lower, upper]]:
constant[Proximal operator factory for ``G(x) = ind(a <= x <= b)``.
If P is the set of elements with a <= x <= b, the indicator function of
which is defined as::
ind(a <= x <= b) = {0 if x in P, infinity if x is not in P}
with x being an element in ``space``.
Parameters
----------
space : `LinearSpace`
Domain of the functional G(x)
lower : ``space.field`` element or ``space`` `element-like`, optional
The lower bound.
Default: ``None``, interpreted as -infinity
upper : ``space.field`` element or ``space`` `element-like`, optional
The upper bound.
Default: ``None``, interpreted as +infinity
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
If :math:`P` is an interval :math:`[a,b]`, the indicator function is
defined as
.. math::
I_{P}(x) = \begin{cases}
0 & \text{if } x \in P, \\
\infty & \text{if } x \not \in P
\end{cases}
For a step size :math:`\sigma`, the proximal operator of
:math:`\sigma I_{P}` is given by the projection onto the interval
.. math::
\mathrm{prox}_{\sigma I_{P}}(x) = \begin{cases}
a & \text{if } x < a, \\
x & \text{if } x \in [a,b], \\
b & \text{if } x > b.
\end{cases}
The proximal operator is independent of :math:`\sigma` and invariant under
a positive rescaling of :math:`I_{P}(x)`, since that leaves the indicator
function unchanged.
For spaces of the form :math:`R^n`, the definition extends naturally
in each component.
See Also
--------
proximal_nonnegativity : Special case with ``lower=0, upper=infty``
]
if <ast.BoolOp object at 0x7da1b20f8b20> begin[:]
variable[lower] assign[=] call[name[space].element, parameter[name[lower]]]
if <ast.BoolOp object at 0x7da1b20fb220> begin[:]
variable[upper] assign[=] call[name[space].element, parameter[name[upper]]]
if <ast.BoolOp object at 0x7da1b20fbbe0> begin[:]
if compare[name[lower] greater[>] name[upper]] begin[:]
<ast.Raise object at 0x7da1b20fada0>
class class[ProxOpBoxConstraint, parameter[]] begin[:]
constant[Proximal operator for G(x) = ind(a <= x <= b).]
def function[__init__, parameter[self, sigma]]:
constant[Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter, not used.
]
call[call[name[super], parameter[name[ProxOpBoxConstraint], name[self]]].__init__, parameter[]]
def function[_call, parameter[self, x, out]]:
constant[Apply the operator to ``x`` and store the result in ``out``.]
if <ast.BoolOp object at 0x7da1b20f9cc0> begin[:]
call[name[x].ufuncs.maximum, parameter[name[lower]]]
return[name[ProxOpBoxConstraint]] | keyword[def] identifier[proximal_box_constraint] ( identifier[space] , identifier[lower] = keyword[None] , identifier[upper] = keyword[None] ):
literal[string]
keyword[if] identifier[lower] keyword[is] keyword[not] keyword[None] keyword[and] identifier[lower] keyword[not] keyword[in] identifier[space] keyword[and] identifier[lower] keyword[not] keyword[in] identifier[space] . identifier[field] :
identifier[lower] = identifier[space] . identifier[element] ( identifier[lower] )
keyword[if] identifier[upper] keyword[is] keyword[not] keyword[None] keyword[and] identifier[upper] keyword[not] keyword[in] identifier[space] keyword[and] identifier[upper] keyword[not] keyword[in] identifier[space] . identifier[field] :
identifier[upper] = identifier[space] . identifier[element] ( identifier[upper] )
keyword[if] identifier[lower] keyword[in] identifier[space] . identifier[field] keyword[and] identifier[upper] keyword[in] identifier[space] . identifier[field] :
keyword[if] identifier[lower] > identifier[upper] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[lower] , identifier[upper] ))
keyword[class] identifier[ProxOpBoxConstraint] ( identifier[Operator] ):
literal[string]
keyword[def] identifier[__init__] ( identifier[self] , identifier[sigma] ):
literal[string]
identifier[super] ( identifier[ProxOpBoxConstraint] , identifier[self] ). identifier[__init__] (
identifier[domain] = identifier[space] , identifier[range] = identifier[space] , identifier[linear] = keyword[False] )
keyword[def] identifier[_call] ( identifier[self] , identifier[x] , identifier[out] ):
literal[string]
keyword[if] identifier[lower] keyword[is] keyword[not] keyword[None] keyword[and] identifier[upper] keyword[is] keyword[None] :
identifier[x] . identifier[ufuncs] . identifier[maximum] ( identifier[lower] , identifier[out] = identifier[out] )
keyword[elif] identifier[lower] keyword[is] keyword[None] keyword[and] identifier[upper] keyword[is] keyword[not] keyword[None] :
identifier[x] . identifier[ufuncs] . identifier[minimum] ( identifier[upper] , identifier[out] = identifier[out] )
keyword[elif] identifier[lower] keyword[is] keyword[not] keyword[None] keyword[and] identifier[upper] keyword[is] keyword[not] keyword[None] :
identifier[x] . identifier[ufuncs] . identifier[maximum] ( identifier[lower] , identifier[out] = identifier[out] )
identifier[out] . identifier[ufuncs] . identifier[minimum] ( identifier[upper] , identifier[out] = identifier[out] )
keyword[else] :
identifier[out] . identifier[assign] ( identifier[x] )
keyword[return] identifier[ProxOpBoxConstraint] | def proximal_box_constraint(space, lower=None, upper=None):
"""Proximal operator factory for ``G(x) = ind(a <= x <= b)``.
If P is the set of elements with a <= x <= b, the indicator function of
which is defined as::
ind(a <= x <= b) = {0 if x in P, infinity if x is not in P}
with x being an element in ``space``.
Parameters
----------
space : `LinearSpace`
Domain of the functional G(x)
lower : ``space.field`` element or ``space`` `element-like`, optional
The lower bound.
Default: ``None``, interpreted as -infinity
upper : ``space.field`` element or ``space`` `element-like`, optional
The upper bound.
Default: ``None``, interpreted as +infinity
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
If :math:`P` is an interval :math:`[a,b]`, the indicator function is
defined as
.. math::
I_{P}(x) = \\begin{cases}
0 & \\text{if } x \\in P, \\\\
\\infty & \\text{if } x \\not \\in P
\\end{cases}
For a step size :math:`\\sigma`, the proximal operator of
:math:`\\sigma I_{P}` is given by the projection onto the interval
.. math::
\\mathrm{prox}_{\\sigma I_{P}}(x) = \\begin{cases}
a & \\text{if } x < a, \\\\
x & \\text{if } x \\in [a,b], \\\\
b & \\text{if } x > b.
\\end{cases}
The proximal operator is independent of :math:`\\sigma` and invariant under
a positive rescaling of :math:`I_{P}(x)`, since that leaves the indicator
function unchanged.
For spaces of the form :math:`R^n`, the definition extends naturally
in each component.
See Also
--------
proximal_nonnegativity : Special case with ``lower=0, upper=infty``
"""
# Convert element-likes if needed, also does some space checking
if lower is not None and lower not in space and (lower not in space.field):
lower = space.element(lower) # depends on [control=['if'], data=[]]
if upper is not None and upper not in space and (upper not in space.field):
upper = space.element(upper) # depends on [control=['if'], data=[]]
if lower in space.field and upper in space.field:
if lower > upper:
raise ValueError('invalid values, `lower` ({}) > `upper` ({})'.format(lower, upper)) # depends on [control=['if'], data=['lower', 'upper']] # depends on [control=['if'], data=[]]
class ProxOpBoxConstraint(Operator):
"""Proximal operator for G(x) = ind(a <= x <= b)."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter, not used.
"""
super(ProxOpBoxConstraint, self).__init__(domain=space, range=space, linear=False)
def _call(self, x, out):
"""Apply the operator to ``x`` and store the result in ``out``."""
if lower is not None and upper is None:
x.ufuncs.maximum(lower, out=out) # depends on [control=['if'], data=[]]
elif lower is None and upper is not None:
x.ufuncs.minimum(upper, out=out) # depends on [control=['if'], data=[]]
elif lower is not None and upper is not None:
x.ufuncs.maximum(lower, out=out)
out.ufuncs.minimum(upper, out=out) # depends on [control=['if'], data=[]]
else:
out.assign(x)
return ProxOpBoxConstraint |
def juju_state_to_yaml(yaml_path, namespace_separator=':',
allow_hyphens_in_keys=True, mode=None):
"""Update the juju config and state in a yaml file.
This includes any current relation-get data, and the charm
directory.
This function was created for the ansible and saltstack
support, as those libraries can use a yaml file to supply
context to templates, but it may be useful generally to
create and update an on-disk cache of all the config, including
previous relation data.
By default, hyphens are allowed in keys as this is supported
by yaml, but for tools like ansible, hyphens are not valid [1].
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
"""
config = charmhelpers.core.hookenv.config()
# Add the charm_dir which we will need to refer to charm
# file resources etc.
config['charm_dir'] = charm_dir
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
'public-address'
)
# Don't use non-standard tags for unicode which will not
# work when salt uses yaml.load_safe.
yaml.add_representer(six.text_type,
lambda dumper, value: dumper.represent_scalar(
six.u('tag:yaml.org,2002:str'), value))
yaml_dir = os.path.dirname(yaml_path)
if not os.path.exists(yaml_dir):
os.makedirs(yaml_dir)
if os.path.exists(yaml_path):
with open(yaml_path, "r") as existing_vars_file:
existing_vars = yaml.load(existing_vars_file.read())
else:
with open(yaml_path, "w+"):
pass
existing_vars = {}
if mode is not None:
os.chmod(yaml_path, mode)
if not allow_hyphens_in_keys:
config = dict_keys_without_hyphens(config)
existing_vars.update(config)
update_relations(existing_vars, namespace_separator)
with open(yaml_path, "w+") as fp:
fp.write(yaml.dump(existing_vars, default_flow_style=False)) | def function[juju_state_to_yaml, parameter[yaml_path, namespace_separator, allow_hyphens_in_keys, mode]]:
constant[Update the juju config and state in a yaml file.
This includes any current relation-get data, and the charm
directory.
This function was created for the ansible and saltstack
support, as those libraries can use a yaml file to supply
context to templates, but it may be useful generally to
create and update an on-disk cache of all the config, including
previous relation data.
By default, hyphens are allowed in keys as this is supported
by yaml, but for tools like ansible, hyphens are not valid [1].
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
]
variable[config] assign[=] call[name[charmhelpers].core.hookenv.config, parameter[]]
call[name[config]][constant[charm_dir]] assign[=] name[charm_dir]
call[name[config]][constant[local_unit]] assign[=] call[name[charmhelpers].core.hookenv.local_unit, parameter[]]
call[name[config]][constant[unit_private_address]] assign[=] call[name[charmhelpers].core.hookenv.unit_private_ip, parameter[]]
call[name[config]][constant[unit_public_address]] assign[=] call[name[charmhelpers].core.hookenv.unit_get, parameter[constant[public-address]]]
call[name[yaml].add_representer, parameter[name[six].text_type, <ast.Lambda object at 0x7da18f810a90>]]
variable[yaml_dir] assign[=] call[name[os].path.dirname, parameter[name[yaml_path]]]
if <ast.UnaryOp object at 0x7da2054a5f30> begin[:]
call[name[os].makedirs, parameter[name[yaml_dir]]]
if call[name[os].path.exists, parameter[name[yaml_path]]] begin[:]
with call[name[open], parameter[name[yaml_path], constant[r]]] begin[:]
variable[existing_vars] assign[=] call[name[yaml].load, parameter[call[name[existing_vars_file].read, parameter[]]]]
if compare[name[mode] is_not constant[None]] begin[:]
call[name[os].chmod, parameter[name[yaml_path], name[mode]]]
if <ast.UnaryOp object at 0x7da2054a79d0> begin[:]
variable[config] assign[=] call[name[dict_keys_without_hyphens], parameter[name[config]]]
call[name[existing_vars].update, parameter[name[config]]]
call[name[update_relations], parameter[name[existing_vars], name[namespace_separator]]]
with call[name[open], parameter[name[yaml_path], constant[w+]]] begin[:]
call[name[fp].write, parameter[call[name[yaml].dump, parameter[name[existing_vars]]]]] | keyword[def] identifier[juju_state_to_yaml] ( identifier[yaml_path] , identifier[namespace_separator] = literal[string] ,
identifier[allow_hyphens_in_keys] = keyword[True] , identifier[mode] = keyword[None] ):
literal[string]
identifier[config] = identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[config] ()
identifier[config] [ literal[string] ]= identifier[charm_dir]
identifier[config] [ literal[string] ]= identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[local_unit] ()
identifier[config] [ literal[string] ]= identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[unit_private_ip] ()
identifier[config] [ literal[string] ]= identifier[charmhelpers] . identifier[core] . identifier[hookenv] . identifier[unit_get] (
literal[string]
)
identifier[yaml] . identifier[add_representer] ( identifier[six] . identifier[text_type] ,
keyword[lambda] identifier[dumper] , identifier[value] : identifier[dumper] . identifier[represent_scalar] (
identifier[six] . identifier[u] ( literal[string] ), identifier[value] ))
identifier[yaml_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[yaml_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[yaml_dir] ):
identifier[os] . identifier[makedirs] ( identifier[yaml_dir] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[yaml_path] ):
keyword[with] identifier[open] ( identifier[yaml_path] , literal[string] ) keyword[as] identifier[existing_vars_file] :
identifier[existing_vars] = identifier[yaml] . identifier[load] ( identifier[existing_vars_file] . identifier[read] ())
keyword[else] :
keyword[with] identifier[open] ( identifier[yaml_path] , literal[string] ):
keyword[pass]
identifier[existing_vars] ={}
keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[chmod] ( identifier[yaml_path] , identifier[mode] )
keyword[if] keyword[not] identifier[allow_hyphens_in_keys] :
identifier[config] = identifier[dict_keys_without_hyphens] ( identifier[config] )
identifier[existing_vars] . identifier[update] ( identifier[config] )
identifier[update_relations] ( identifier[existing_vars] , identifier[namespace_separator] )
keyword[with] identifier[open] ( identifier[yaml_path] , literal[string] ) keyword[as] identifier[fp] :
identifier[fp] . identifier[write] ( identifier[yaml] . identifier[dump] ( identifier[existing_vars] , identifier[default_flow_style] = keyword[False] )) | def juju_state_to_yaml(yaml_path, namespace_separator=':', allow_hyphens_in_keys=True, mode=None):
"""Update the juju config and state in a yaml file.
This includes any current relation-get data, and the charm
directory.
This function was created for the ansible and saltstack
support, as those libraries can use a yaml file to supply
context to templates, but it may be useful generally to
create and update an on-disk cache of all the config, including
previous relation data.
By default, hyphens are allowed in keys as this is supported
by yaml, but for tools like ansible, hyphens are not valid [1].
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
"""
config = charmhelpers.core.hookenv.config()
# Add the charm_dir which we will need to refer to charm
# file resources etc.
config['charm_dir'] = charm_dir
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
config['unit_public_address'] = charmhelpers.core.hookenv.unit_get('public-address')
# Don't use non-standard tags for unicode which will not
# work when salt uses yaml.load_safe.
yaml.add_representer(six.text_type, lambda dumper, value: dumper.represent_scalar(six.u('tag:yaml.org,2002:str'), value))
yaml_dir = os.path.dirname(yaml_path)
if not os.path.exists(yaml_dir):
os.makedirs(yaml_dir) # depends on [control=['if'], data=[]]
if os.path.exists(yaml_path):
with open(yaml_path, 'r') as existing_vars_file:
existing_vars = yaml.load(existing_vars_file.read()) # depends on [control=['with'], data=['existing_vars_file']] # depends on [control=['if'], data=[]]
else:
with open(yaml_path, 'w+'):
pass # depends on [control=['with'], data=[]]
existing_vars = {}
if mode is not None:
os.chmod(yaml_path, mode) # depends on [control=['if'], data=['mode']]
if not allow_hyphens_in_keys:
config = dict_keys_without_hyphens(config) # depends on [control=['if'], data=[]]
existing_vars.update(config)
update_relations(existing_vars, namespace_separator)
with open(yaml_path, 'w+') as fp:
fp.write(yaml.dump(existing_vars, default_flow_style=False)) # depends on [control=['with'], data=['fp']] |
def create_storage_account(self, service_name, description, label,
affinity_group=None, location=None,
geo_replication_enabled=None,
extended_properties=None,
account_type='Standard_GRS'):
'''
Creates a new storage account in Windows Azure.
service_name:
A name for the storage account that is unique within Windows Azure.
Storage account names must be between 3 and 24 characters in length
and use numbers and lower-case letters only.
description:
A description for the storage account. The description may be up
to 1024 characters in length.
label:
A name for the storage account. The name may be up to 100
characters in length. The name can be used to identify the storage
account for your tracking purposes.
affinity_group:
The name of an existing affinity group in the specified
subscription. You can specify either a location or affinity_group,
but not both.
location:
The location where the storage account is created. You can specify
either a location or affinity_group, but not both.
geo_replication_enabled:
Deprecated. Replaced by the account_type parameter.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
account_type:
Specifies whether the account supports locally-redundant storage,
geo-redundant storage, zone-redundant storage, or read access
geo-redundant storage.
Possible values are:
Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS
'''
_validate_not_none('service_name', service_name)
_validate_not_none('description', description)
_validate_not_none('label', label)
if affinity_group is None and location is None:
raise ValueError(
'location or affinity_group must be specified')
if affinity_group is not None and location is not None:
raise ValueError(
'Only one of location or affinity_group needs to be specified')
if geo_replication_enabled == False:
account_type = 'Standard_LRS'
return self._perform_post(
self._get_storage_service_path(),
_XmlSerializer.create_storage_service_input_to_xml(
service_name,
description,
label,
affinity_group,
location,
account_type,
extended_properties),
as_async=True) | def function[create_storage_account, parameter[self, service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties, account_type]]:
constant[
Creates a new storage account in Windows Azure.
service_name:
A name for the storage account that is unique within Windows Azure.
Storage account names must be between 3 and 24 characters in length
and use numbers and lower-case letters only.
description:
A description for the storage account. The description may be up
to 1024 characters in length.
label:
A name for the storage account. The name may be up to 100
characters in length. The name can be used to identify the storage
account for your tracking purposes.
affinity_group:
The name of an existing affinity group in the specified
subscription. You can specify either a location or affinity_group,
but not both.
location:
The location where the storage account is created. You can specify
either a location or affinity_group, but not both.
geo_replication_enabled:
Deprecated. Replaced by the account_type parameter.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
account_type:
Specifies whether the account supports locally-redundant storage,
geo-redundant storage, zone-redundant storage, or read access
geo-redundant storage.
Possible values are:
Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS
]
call[name[_validate_not_none], parameter[constant[service_name], name[service_name]]]
call[name[_validate_not_none], parameter[constant[description], name[description]]]
call[name[_validate_not_none], parameter[constant[label], name[label]]]
if <ast.BoolOp object at 0x7da18eb55120> begin[:]
<ast.Raise object at 0x7da18eb56b00>
if <ast.BoolOp object at 0x7da18eb56320> begin[:]
<ast.Raise object at 0x7da18eb55300>
if compare[name[geo_replication_enabled] equal[==] constant[False]] begin[:]
variable[account_type] assign[=] constant[Standard_LRS]
return[call[name[self]._perform_post, parameter[call[name[self]._get_storage_service_path, parameter[]], call[name[_XmlSerializer].create_storage_service_input_to_xml, parameter[name[service_name], name[description], name[label], name[affinity_group], name[location], name[account_type], name[extended_properties]]]]]] | keyword[def] identifier[create_storage_account] ( identifier[self] , identifier[service_name] , identifier[description] , identifier[label] ,
identifier[affinity_group] = keyword[None] , identifier[location] = keyword[None] ,
identifier[geo_replication_enabled] = keyword[None] ,
identifier[extended_properties] = keyword[None] ,
identifier[account_type] = literal[string] ):
literal[string]
identifier[_validate_not_none] ( literal[string] , identifier[service_name] )
identifier[_validate_not_none] ( literal[string] , identifier[description] )
identifier[_validate_not_none] ( literal[string] , identifier[label] )
keyword[if] identifier[affinity_group] keyword[is] keyword[None] keyword[and] identifier[location] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] identifier[affinity_group] keyword[is] keyword[not] keyword[None] keyword[and] identifier[location] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] identifier[geo_replication_enabled] == keyword[False] :
identifier[account_type] = literal[string]
keyword[return] identifier[self] . identifier[_perform_post] (
identifier[self] . identifier[_get_storage_service_path] (),
identifier[_XmlSerializer] . identifier[create_storage_service_input_to_xml] (
identifier[service_name] ,
identifier[description] ,
identifier[label] ,
identifier[affinity_group] ,
identifier[location] ,
identifier[account_type] ,
identifier[extended_properties] ),
identifier[as_async] = keyword[True] ) | def create_storage_account(self, service_name, description, label, affinity_group=None, location=None, geo_replication_enabled=None, extended_properties=None, account_type='Standard_GRS'):
"""
Creates a new storage account in Windows Azure.
service_name:
A name for the storage account that is unique within Windows Azure.
Storage account names must be between 3 and 24 characters in length
and use numbers and lower-case letters only.
description:
A description for the storage account. The description may be up
to 1024 characters in length.
label:
A name for the storage account. The name may be up to 100
characters in length. The name can be used to identify the storage
account for your tracking purposes.
affinity_group:
The name of an existing affinity group in the specified
subscription. You can specify either a location or affinity_group,
but not both.
location:
The location where the storage account is created. You can specify
either a location or affinity_group, but not both.
geo_replication_enabled:
Deprecated. Replaced by the account_type parameter.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
account_type:
Specifies whether the account supports locally-redundant storage,
geo-redundant storage, zone-redundant storage, or read access
geo-redundant storage.
Possible values are:
Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS
"""
_validate_not_none('service_name', service_name)
_validate_not_none('description', description)
_validate_not_none('label', label)
if affinity_group is None and location is None:
raise ValueError('location or affinity_group must be specified') # depends on [control=['if'], data=[]]
if affinity_group is not None and location is not None:
raise ValueError('Only one of location or affinity_group needs to be specified') # depends on [control=['if'], data=[]]
if geo_replication_enabled == False:
account_type = 'Standard_LRS' # depends on [control=['if'], data=[]]
return self._perform_post(self._get_storage_service_path(), _XmlSerializer.create_storage_service_input_to_xml(service_name, description, label, affinity_group, location, account_type, extended_properties), as_async=True) |
def render(self, progress, width=None, status=None):
"""Render the widget."""
current_pct = int(progress * 100 + 0.1)
return RenderResult(rendered="%3d%%" % current_pct, next_progress=(current_pct + 1) / 100) | def function[render, parameter[self, progress, width, status]]:
constant[Render the widget.]
variable[current_pct] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[progress] * constant[100]] + constant[0.1]]]]
return[call[name[RenderResult], parameter[]]] | keyword[def] identifier[render] ( identifier[self] , identifier[progress] , identifier[width] = keyword[None] , identifier[status] = keyword[None] ):
literal[string]
identifier[current_pct] = identifier[int] ( identifier[progress] * literal[int] + literal[int] )
keyword[return] identifier[RenderResult] ( identifier[rendered] = literal[string] % identifier[current_pct] , identifier[next_progress] =( identifier[current_pct] + literal[int] )/ literal[int] ) | def render(self, progress, width=None, status=None):
"""Render the widget."""
current_pct = int(progress * 100 + 0.1)
return RenderResult(rendered='%3d%%' % current_pct, next_progress=(current_pct + 1) / 100) |
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
"""
logits_name = self._get_logits_name()
logits_layer = self.get_layer(x, logits_name)
# Need to deal with the case where softmax is part of the
# logits layer
if logits_name == self._get_softmax_name():
softmax_logit_layer = self.get_layer(x, logits_name)
# The final op is the softmax. Return its input
logits_layer = softmax_logit_layer._op.inputs[0]
return logits_layer | def function[get_logits, parameter[self, x]]:
constant[
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
]
variable[logits_name] assign[=] call[name[self]._get_logits_name, parameter[]]
variable[logits_layer] assign[=] call[name[self].get_layer, parameter[name[x], name[logits_name]]]
if compare[name[logits_name] equal[==] call[name[self]._get_softmax_name, parameter[]]] begin[:]
variable[softmax_logit_layer] assign[=] call[name[self].get_layer, parameter[name[x], name[logits_name]]]
variable[logits_layer] assign[=] call[name[softmax_logit_layer]._op.inputs][constant[0]]
return[name[logits_layer]] | keyword[def] identifier[get_logits] ( identifier[self] , identifier[x] ):
literal[string]
identifier[logits_name] = identifier[self] . identifier[_get_logits_name] ()
identifier[logits_layer] = identifier[self] . identifier[get_layer] ( identifier[x] , identifier[logits_name] )
keyword[if] identifier[logits_name] == identifier[self] . identifier[_get_softmax_name] ():
identifier[softmax_logit_layer] = identifier[self] . identifier[get_layer] ( identifier[x] , identifier[logits_name] )
identifier[logits_layer] = identifier[softmax_logit_layer] . identifier[_op] . identifier[inputs] [ literal[int] ]
keyword[return] identifier[logits_layer] | def get_logits(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
"""
logits_name = self._get_logits_name()
logits_layer = self.get_layer(x, logits_name)
# Need to deal with the case where softmax is part of the
# logits layer
if logits_name == self._get_softmax_name():
softmax_logit_layer = self.get_layer(x, logits_name)
# The final op is the softmax. Return its input
logits_layer = softmax_logit_layer._op.inputs[0] # depends on [control=['if'], data=['logits_name']]
return logits_layer |
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self) | def function[groupBy, parameter[self]]:
constant[Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
]
variable[jgd] assign[=] call[name[self]._jdf.groupBy, parameter[call[name[self]._jcols, parameter[<ast.Starred object at 0x7da20e955600>]]]]
from relative_module[pyspark.sql.group] import module[GroupedData]
return[call[name[GroupedData], parameter[name[jgd], name[self]]]] | keyword[def] identifier[groupBy] ( identifier[self] ,* identifier[cols] ):
literal[string]
identifier[jgd] = identifier[self] . identifier[_jdf] . identifier[groupBy] ( identifier[self] . identifier[_jcols] (* identifier[cols] ))
keyword[from] identifier[pyspark] . identifier[sql] . identifier[group] keyword[import] identifier[GroupedData]
keyword[return] identifier[GroupedData] ( identifier[jgd] , identifier[self] ) | def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self) |
def WriteCronJobRun(self, run_object):
"""Stores a cron job run object in the database."""
if run_object.cron_job_id not in self.cronjobs:
raise db.UnknownCronJobError("Job with id %s not found." %
run_object.cron_job_id)
clone = run_object.Copy()
clone.timestamp = rdfvalue.RDFDatetime.Now()
self.cronjob_runs[(clone.cron_job_id, clone.run_id)] = clone | def function[WriteCronJobRun, parameter[self, run_object]]:
constant[Stores a cron job run object in the database.]
if compare[name[run_object].cron_job_id <ast.NotIn object at 0x7da2590d7190> name[self].cronjobs] begin[:]
<ast.Raise object at 0x7da1b1b87400>
variable[clone] assign[=] call[name[run_object].Copy, parameter[]]
name[clone].timestamp assign[=] call[name[rdfvalue].RDFDatetime.Now, parameter[]]
call[name[self].cronjob_runs][tuple[[<ast.Attribute object at 0x7da1b1d90e50>, <ast.Attribute object at 0x7da1b1d91030>]]] assign[=] name[clone] | keyword[def] identifier[WriteCronJobRun] ( identifier[self] , identifier[run_object] ):
literal[string]
keyword[if] identifier[run_object] . identifier[cron_job_id] keyword[not] keyword[in] identifier[self] . identifier[cronjobs] :
keyword[raise] identifier[db] . identifier[UnknownCronJobError] ( literal[string] %
identifier[run_object] . identifier[cron_job_id] )
identifier[clone] = identifier[run_object] . identifier[Copy] ()
identifier[clone] . identifier[timestamp] = identifier[rdfvalue] . identifier[RDFDatetime] . identifier[Now] ()
identifier[self] . identifier[cronjob_runs] [( identifier[clone] . identifier[cron_job_id] , identifier[clone] . identifier[run_id] )]= identifier[clone] | def WriteCronJobRun(self, run_object):
"""Stores a cron job run object in the database."""
if run_object.cron_job_id not in self.cronjobs:
raise db.UnknownCronJobError('Job with id %s not found.' % run_object.cron_job_id) # depends on [control=['if'], data=[]]
clone = run_object.Copy()
clone.timestamp = rdfvalue.RDFDatetime.Now()
self.cronjob_runs[clone.cron_job_id, clone.run_id] = clone |
def StringEscape(self, string, match, **_):
"""Escape backslashes found inside a string quote.
Backslashes followed by anything other than [\'"rnbt] will raise an Error.
Args:
string: The string that matched.
match: The match object (m.group(1) is the escaped code)
Raises:
ParseError: For strings other than those used to define a regexp, raise an
error if the escaped string is not one of [\'"rnbt].
"""
precondition.AssertType(string, Text)
# Allow unfiltered strings for regexp operations so that escaped special
# characters (e.g. \*) or special sequences (e.g. \w) can be used in
# objectfilter.
if self.current_expression.operator == "regexp":
self.string += compatibility.UnescapeString(string)
elif match.group(1) in "\\'\"rnbt":
self.string += compatibility.UnescapeString(string)
else:
raise ParseError("Invalid escape character %s." % string) | def function[StringEscape, parameter[self, string, match]]:
constant[Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will raise an Error.
Args:
string: The string that matched.
match: The match object (m.group(1) is the escaped code)
Raises:
ParseError: For strings other than those used to define a regexp, raise an
error if the escaped string is not one of ['"rnbt].
]
call[name[precondition].AssertType, parameter[name[string], name[Text]]]
if compare[name[self].current_expression.operator equal[==] constant[regexp]] begin[:]
<ast.AugAssign object at 0x7da204623d30> | keyword[def] identifier[StringEscape] ( identifier[self] , identifier[string] , identifier[match] ,** identifier[_] ):
literal[string]
identifier[precondition] . identifier[AssertType] ( identifier[string] , identifier[Text] )
keyword[if] identifier[self] . identifier[current_expression] . identifier[operator] == literal[string] :
identifier[self] . identifier[string] += identifier[compatibility] . identifier[UnescapeString] ( identifier[string] )
keyword[elif] identifier[match] . identifier[group] ( literal[int] ) keyword[in] literal[string] :
identifier[self] . identifier[string] += identifier[compatibility] . identifier[UnescapeString] ( identifier[string] )
keyword[else] :
keyword[raise] identifier[ParseError] ( literal[string] % identifier[string] ) | def StringEscape(self, string, match, **_):
"""Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will raise an Error.
Args:
string: The string that matched.
match: The match object (m.group(1) is the escaped code)
Raises:
ParseError: For strings other than those used to define a regexp, raise an
error if the escaped string is not one of ['"rnbt].
"""
precondition.AssertType(string, Text)
# Allow unfiltered strings for regexp operations so that escaped special
# characters (e.g. \*) or special sequences (e.g. \w) can be used in
# objectfilter.
if self.current_expression.operator == 'regexp':
self.string += compatibility.UnescapeString(string) # depends on [control=['if'], data=[]]
elif match.group(1) in '\\\'"rnbt':
self.string += compatibility.UnescapeString(string) # depends on [control=['if'], data=[]]
else:
raise ParseError('Invalid escape character %s.' % string) |
def get_bundle(self, bundle_name, extensions=None):
""" Get all the chunks contained in a bundle """
if self.stats.get('status') == 'done':
bundle = self.stats.get('chunks', {}).get(bundle_name, None)
if bundle is None:
raise KeyError('No such bundle {0!r}.'.format(bundle_name))
test = self._chunk_filter(extensions)
return [self._add_url(c) for c in bundle if test(c)]
elif self.stats.get('status') == 'error':
raise RuntimeError("{error}: {message}".format(**self.stats))
else:
raise RuntimeError(
"Bad webpack stats file {0} status: {1!r}"
.format(self.state.stats_file, self.stats.get('status'))) | def function[get_bundle, parameter[self, bundle_name, extensions]]:
constant[ Get all the chunks contained in a bundle ]
if compare[call[name[self].stats.get, parameter[constant[status]]] equal[==] constant[done]] begin[:]
variable[bundle] assign[=] call[call[name[self].stats.get, parameter[constant[chunks], dictionary[[], []]]].get, parameter[name[bundle_name], constant[None]]]
if compare[name[bundle] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b10378b0>
variable[test] assign[=] call[name[self]._chunk_filter, parameter[name[extensions]]]
return[<ast.ListComp object at 0x7da1b1034430>] | keyword[def] identifier[get_bundle] ( identifier[self] , identifier[bundle_name] , identifier[extensions] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[stats] . identifier[get] ( literal[string] )== literal[string] :
identifier[bundle] = identifier[self] . identifier[stats] . identifier[get] ( literal[string] ,{}). identifier[get] ( identifier[bundle_name] , keyword[None] )
keyword[if] identifier[bundle] keyword[is] keyword[None] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[bundle_name] ))
identifier[test] = identifier[self] . identifier[_chunk_filter] ( identifier[extensions] )
keyword[return] [ identifier[self] . identifier[_add_url] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[bundle] keyword[if] identifier[test] ( identifier[c] )]
keyword[elif] identifier[self] . identifier[stats] . identifier[get] ( literal[string] )== literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] (** identifier[self] . identifier[stats] ))
keyword[else] :
keyword[raise] identifier[RuntimeError] (
literal[string]
. identifier[format] ( identifier[self] . identifier[state] . identifier[stats_file] , identifier[self] . identifier[stats] . identifier[get] ( literal[string] ))) | def get_bundle(self, bundle_name, extensions=None):
""" Get all the chunks contained in a bundle """
if self.stats.get('status') == 'done':
bundle = self.stats.get('chunks', {}).get(bundle_name, None)
if bundle is None:
raise KeyError('No such bundle {0!r}.'.format(bundle_name)) # depends on [control=['if'], data=[]]
test = self._chunk_filter(extensions)
return [self._add_url(c) for c in bundle if test(c)] # depends on [control=['if'], data=[]]
elif self.stats.get('status') == 'error':
raise RuntimeError('{error}: {message}'.format(**self.stats)) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Bad webpack stats file {0} status: {1!r}'.format(self.state.stats_file, self.stats.get('status'))) |
def deploy_hypervisor(self):
"""Install the libvirtd and instack-undercloud packages.
"""
self.yum_install(['libvirt-daemon-driver-nwfilter', 'libvirt-client', 'libvirt-daemon-config-network', 'libvirt-daemon-driver-nodedev', 'libvirt-daemon-kvm', 'libvirt-python', 'libvirt-daemon-config-nwfilter', 'libvirt-glib', 'libvirt-daemon', 'libvirt-daemon-driver-storage', 'libvirt', 'libvirt-daemon-driver-network', 'libvirt-devel', 'libvirt-gobject', 'libvirt-daemon-driver-secret', 'libvirt-daemon-driver-qemu', 'libvirt-daemon-driver-interface', 'libguestfs-tools', 'virt-install', 'genisoimage', 'openstack-tripleo', 'instack-undercloud'])
self.run('sed -i "s,#auth_unix_rw,auth_unix_rw," /etc/libvirt/libvirtd.conf')
self.run('systemctl start libvirtd')
self.run('systemctl status libvirtd')
self.install_base_packages()
self.clean_system()
self.yum_update() | def function[deploy_hypervisor, parameter[self]]:
constant[Install the libvirtd and instack-undercloud packages.
]
call[name[self].yum_install, parameter[list[[<ast.Constant object at 0x7da204344550>, <ast.Constant object at 0x7da2043475e0>, <ast.Constant object at 0x7da204344cd0>, <ast.Constant object at 0x7da2043473d0>, <ast.Constant object at 0x7da204345ab0>, <ast.Constant object at 0x7da204346470>, <ast.Constant object at 0x7da204344430>, <ast.Constant object at 0x7da204347160>, <ast.Constant object at 0x7da204345960>, <ast.Constant object at 0x7da204344e80>, <ast.Constant object at 0x7da204344eb0>, <ast.Constant object at 0x7da204346fb0>, <ast.Constant object at 0x7da204346740>, <ast.Constant object at 0x7da2043478b0>, <ast.Constant object at 0x7da204344bb0>, <ast.Constant object at 0x7da204344970>, <ast.Constant object at 0x7da2043444f0>, <ast.Constant object at 0x7da2043440a0>, <ast.Constant object at 0x7da204346770>, <ast.Constant object at 0x7da204346230>, <ast.Constant object at 0x7da2043467a0>, <ast.Constant object at 0x7da204345570>]]]]
call[name[self].run, parameter[constant[sed -i "s,#auth_unix_rw,auth_unix_rw," /etc/libvirt/libvirtd.conf]]]
call[name[self].run, parameter[constant[systemctl start libvirtd]]]
call[name[self].run, parameter[constant[systemctl status libvirtd]]]
call[name[self].install_base_packages, parameter[]]
call[name[self].clean_system, parameter[]]
call[name[self].yum_update, parameter[]] | keyword[def] identifier[deploy_hypervisor] ( identifier[self] ):
literal[string]
identifier[self] . identifier[yum_install] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
identifier[self] . identifier[run] ( literal[string] )
identifier[self] . identifier[run] ( literal[string] )
identifier[self] . identifier[run] ( literal[string] )
identifier[self] . identifier[install_base_packages] ()
identifier[self] . identifier[clean_system] ()
identifier[self] . identifier[yum_update] () | def deploy_hypervisor(self):
"""Install the libvirtd and instack-undercloud packages.
"""
self.yum_install(['libvirt-daemon-driver-nwfilter', 'libvirt-client', 'libvirt-daemon-config-network', 'libvirt-daemon-driver-nodedev', 'libvirt-daemon-kvm', 'libvirt-python', 'libvirt-daemon-config-nwfilter', 'libvirt-glib', 'libvirt-daemon', 'libvirt-daemon-driver-storage', 'libvirt', 'libvirt-daemon-driver-network', 'libvirt-devel', 'libvirt-gobject', 'libvirt-daemon-driver-secret', 'libvirt-daemon-driver-qemu', 'libvirt-daemon-driver-interface', 'libguestfs-tools', 'virt-install', 'genisoimage', 'openstack-tripleo', 'instack-undercloud'])
self.run('sed -i "s,#auth_unix_rw,auth_unix_rw," /etc/libvirt/libvirtd.conf')
self.run('systemctl start libvirtd')
self.run('systemctl status libvirtd')
self.install_base_packages()
self.clean_system()
self.yum_update() |
def export(self, view_datas):
"""export view datas to registered exporters"""
view_datas_copy = \
[self.copy_and_finalize_view_data(vd) for vd in view_datas]
if len(self.exporters) > 0:
for e in self.exporters:
try:
e.export(view_datas_copy)
except AttributeError:
pass | def function[export, parameter[self, view_datas]]:
constant[export view datas to registered exporters]
variable[view_datas_copy] assign[=] <ast.ListComp object at 0x7da18c4cfbb0>
if compare[call[name[len], parameter[name[self].exporters]] greater[>] constant[0]] begin[:]
for taget[name[e]] in starred[name[self].exporters] begin[:]
<ast.Try object at 0x7da18c4cc250> | keyword[def] identifier[export] ( identifier[self] , identifier[view_datas] ):
literal[string]
identifier[view_datas_copy] =[ identifier[self] . identifier[copy_and_finalize_view_data] ( identifier[vd] ) keyword[for] identifier[vd] keyword[in] identifier[view_datas] ]
keyword[if] identifier[len] ( identifier[self] . identifier[exporters] )> literal[int] :
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[exporters] :
keyword[try] :
identifier[e] . identifier[export] ( identifier[view_datas_copy] )
keyword[except] identifier[AttributeError] :
keyword[pass] | def export(self, view_datas):
"""export view datas to registered exporters"""
view_datas_copy = [self.copy_and_finalize_view_data(vd) for vd in view_datas]
if len(self.exporters) > 0:
for e in self.exporters:
try:
e.export(view_datas_copy) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=[]] |
def full_route(self):
'''The full :attr:`route` for this :class:`.Router`.
It includes the :attr:`parent` portion of the route if a parent
router is available.
'''
if self._parent:
return self._parent.full_route + self._route
else:
return self._route | def function[full_route, parameter[self]]:
constant[The full :attr:`route` for this :class:`.Router`.
It includes the :attr:`parent` portion of the route if a parent
router is available.
]
if name[self]._parent begin[:]
return[binary_operation[name[self]._parent.full_route + name[self]._route]] | keyword[def] identifier[full_route] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_parent] :
keyword[return] identifier[self] . identifier[_parent] . identifier[full_route] + identifier[self] . identifier[_route]
keyword[else] :
keyword[return] identifier[self] . identifier[_route] | def full_route(self):
"""The full :attr:`route` for this :class:`.Router`.
It includes the :attr:`parent` portion of the route if a parent
router is available.
"""
if self._parent:
return self._parent.full_route + self._route # depends on [control=['if'], data=[]]
else:
return self._route |
def init_nvidia(self):
"""Init the NVIDIA API."""
if import_error_tag:
self.nvml_ready = False
try:
pynvml.nvmlInit()
self.device_handles = get_device_handles()
self.nvml_ready = True
except Exception:
logger.debug("pynvml could not be initialized.")
self.nvml_ready = False
return self.nvml_ready | def function[init_nvidia, parameter[self]]:
constant[Init the NVIDIA API.]
if name[import_error_tag] begin[:]
name[self].nvml_ready assign[=] constant[False]
<ast.Try object at 0x7da20c7c8400>
return[name[self].nvml_ready] | keyword[def] identifier[init_nvidia] ( identifier[self] ):
literal[string]
keyword[if] identifier[import_error_tag] :
identifier[self] . identifier[nvml_ready] = keyword[False]
keyword[try] :
identifier[pynvml] . identifier[nvmlInit] ()
identifier[self] . identifier[device_handles] = identifier[get_device_handles] ()
identifier[self] . identifier[nvml_ready] = keyword[True]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[nvml_ready] = keyword[False]
keyword[return] identifier[self] . identifier[nvml_ready] | def init_nvidia(self):
"""Init the NVIDIA API."""
if import_error_tag:
self.nvml_ready = False # depends on [control=['if'], data=[]]
try:
pynvml.nvmlInit()
self.device_handles = get_device_handles()
self.nvml_ready = True # depends on [control=['try'], data=[]]
except Exception:
logger.debug('pynvml could not be initialized.')
self.nvml_ready = False # depends on [control=['except'], data=[]]
return self.nvml_ready |
def share_application_with_accounts(application_id, account_ids, sar_client=None):
"""
Share the application privately with given AWS account IDs.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param account_ids: List of AWS account IDs, or *
:type account_ids: list of str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id or not account_ids:
raise ValueError('Require application id and list of AWS account IDs to share the app')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
application_policy = ApplicationPolicy(account_ids, [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[application_policy.to_statement()]
) | def function[share_application_with_accounts, parameter[application_id, account_ids, sar_client]]:
constant[
Share the application privately with given AWS account IDs.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param account_ids: List of AWS account IDs, or *
:type account_ids: list of str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
]
if <ast.BoolOp object at 0x7da1b1233400> begin[:]
<ast.Raise object at 0x7da1b1233250>
if <ast.UnaryOp object at 0x7da18f00fe50> begin[:]
variable[sar_client] assign[=] call[name[boto3].client, parameter[constant[serverlessrepo]]]
variable[application_policy] assign[=] call[name[ApplicationPolicy], parameter[name[account_ids], list[[<ast.Attribute object at 0x7da18f00c190>]]]]
call[name[application_policy].validate, parameter[]]
call[name[sar_client].put_application_policy, parameter[]] | keyword[def] identifier[share_application_with_accounts] ( identifier[application_id] , identifier[account_ids] , identifier[sar_client] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[application_id] keyword[or] keyword[not] identifier[account_ids] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[sar_client] :
identifier[sar_client] = identifier[boto3] . identifier[client] ( literal[string] )
identifier[application_policy] = identifier[ApplicationPolicy] ( identifier[account_ids] ,[ identifier[ApplicationPolicy] . identifier[DEPLOY] ])
identifier[application_policy] . identifier[validate] ()
identifier[sar_client] . identifier[put_application_policy] (
identifier[ApplicationId] = identifier[application_id] ,
identifier[Statements] =[ identifier[application_policy] . identifier[to_statement] ()]
) | def share_application_with_accounts(application_id, account_ids, sar_client=None):
"""
Share the application privately with given AWS account IDs.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param account_ids: List of AWS account IDs, or *
:type account_ids: list of str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id or not account_ids:
raise ValueError('Require application id and list of AWS account IDs to share the app') # depends on [control=['if'], data=[]]
if not sar_client:
sar_client = boto3.client('serverlessrepo') # depends on [control=['if'], data=[]]
application_policy = ApplicationPolicy(account_ids, [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(ApplicationId=application_id, Statements=[application_policy.to_statement()]) |
def create_versions(self, project_id, versions):
""" Accepts result of getVersions()
"""
for v in versions:
self.create_version(project_id, v) | def function[create_versions, parameter[self, project_id, versions]]:
constant[ Accepts result of getVersions()
]
for taget[name[v]] in starred[name[versions]] begin[:]
call[name[self].create_version, parameter[name[project_id], name[v]]] | keyword[def] identifier[create_versions] ( identifier[self] , identifier[project_id] , identifier[versions] ):
literal[string]
keyword[for] identifier[v] keyword[in] identifier[versions] :
identifier[self] . identifier[create_version] ( identifier[project_id] , identifier[v] ) | def create_versions(self, project_id, versions):
""" Accepts result of getVersions()
"""
for v in versions:
self.create_version(project_id, v) # depends on [control=['for'], data=['v']] |
def main_pred_type(self, value):
"""set main predicate combination type
:param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``),
``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``),
``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like``
"""
if value not in operators:
value = operator_lkup.get(value)
if value:
self._main_pred_type = value
self.payload['predicate']['type'] = self._main_pred_type
else:
raise Exception("main predicate combiner not a valid operator") | def function[main_pred_type, parameter[self, value]]:
constant[set main predicate combination type
:param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``),
``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``),
``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like``
]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[operators]] begin[:]
variable[value] assign[=] call[name[operator_lkup].get, parameter[name[value]]]
if name[value] begin[:]
name[self]._main_pred_type assign[=] name[value]
call[call[name[self].payload][constant[predicate]]][constant[type]] assign[=] name[self]._main_pred_type | keyword[def] identifier[main_pred_type] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[not] keyword[in] identifier[operators] :
identifier[value] = identifier[operator_lkup] . identifier[get] ( identifier[value] )
keyword[if] identifier[value] :
identifier[self] . identifier[_main_pred_type] = identifier[value]
identifier[self] . identifier[payload] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[_main_pred_type]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def main_pred_type(self, value):
"""set main predicate combination type
:param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``),
``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``),
``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like``
"""
if value not in operators:
value = operator_lkup.get(value) # depends on [control=['if'], data=['value']]
if value:
self._main_pred_type = value
self.payload['predicate']['type'] = self._main_pred_type # depends on [control=['if'], data=[]]
else:
raise Exception('main predicate combiner not a valid operator') |
def get_area_def(self, dsid):
"""Get area definition for message.
If latlong grid then convert to valid eqc grid.
"""
msg = self._get_message(self._msg_datasets[dsid])
try:
return self._area_def_from_msg(msg)
except (RuntimeError, KeyError):
raise RuntimeError("Unknown GRIB projection information") | def function[get_area_def, parameter[self, dsid]]:
constant[Get area definition for message.
If latlong grid then convert to valid eqc grid.
]
variable[msg] assign[=] call[name[self]._get_message, parameter[call[name[self]._msg_datasets][name[dsid]]]]
<ast.Try object at 0x7da1b22f8640> | keyword[def] identifier[get_area_def] ( identifier[self] , identifier[dsid] ):
literal[string]
identifier[msg] = identifier[self] . identifier[_get_message] ( identifier[self] . identifier[_msg_datasets] [ identifier[dsid] ])
keyword[try] :
keyword[return] identifier[self] . identifier[_area_def_from_msg] ( identifier[msg] )
keyword[except] ( identifier[RuntimeError] , identifier[KeyError] ):
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def get_area_def(self, dsid):
"""Get area definition for message.
If latlong grid then convert to valid eqc grid.
"""
msg = self._get_message(self._msg_datasets[dsid])
try:
return self._area_def_from_msg(msg) # depends on [control=['try'], data=[]]
except (RuntimeError, KeyError):
raise RuntimeError('Unknown GRIB projection information') # depends on [control=['except'], data=[]] |
def has_active_subscription(self, plan=None):
"""
Checks to see if this customer has an active subscription to the given plan.
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one active subscription, this method will check if that subscription
is valid. Calling this method with no plan and multiple valid subscriptions for this customer will
throw an exception.
:type plan: Plan or string (plan ID)
:returns: True if there exists an active subscription, False otherwise.
:throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
"""
if plan is None:
valid_subscriptions = self._get_valid_subscriptions()
if len(valid_subscriptions) == 0:
return False
elif len(valid_subscriptions) == 1:
return True
else:
raise TypeError(
"plan cannot be None if more than one valid subscription exists for this customer."
)
else:
# Convert Plan to id
if isinstance(plan, StripeModel):
plan = plan.id
return any(
[
subscription.is_valid()
for subscription in self.subscriptions.filter(plan__id=plan)
]
) | def function[has_active_subscription, parameter[self, plan]]:
constant[
Checks to see if this customer has an active subscription to the given plan.
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one active subscription, this method will check if that subscription
is valid. Calling this method with no plan and multiple valid subscriptions for this customer will
throw an exception.
:type plan: Plan or string (plan ID)
:returns: True if there exists an active subscription, False otherwise.
:throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
]
if compare[name[plan] is constant[None]] begin[:]
variable[valid_subscriptions] assign[=] call[name[self]._get_valid_subscriptions, parameter[]]
if compare[call[name[len], parameter[name[valid_subscriptions]]] equal[==] constant[0]] begin[:]
return[constant[False]] | keyword[def] identifier[has_active_subscription] ( identifier[self] , identifier[plan] = keyword[None] ):
literal[string]
keyword[if] identifier[plan] keyword[is] keyword[None] :
identifier[valid_subscriptions] = identifier[self] . identifier[_get_valid_subscriptions] ()
keyword[if] identifier[len] ( identifier[valid_subscriptions] )== literal[int] :
keyword[return] keyword[False]
keyword[elif] identifier[len] ( identifier[valid_subscriptions] )== literal[int] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string]
)
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[plan] , identifier[StripeModel] ):
identifier[plan] = identifier[plan] . identifier[id]
keyword[return] identifier[any] (
[
identifier[subscription] . identifier[is_valid] ()
keyword[for] identifier[subscription] keyword[in] identifier[self] . identifier[subscriptions] . identifier[filter] ( identifier[plan__id] = identifier[plan] )
]
) | def has_active_subscription(self, plan=None):
"""
Checks to see if this customer has an active subscription to the given plan.
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one active subscription, this method will check if that subscription
is valid. Calling this method with no plan and multiple valid subscriptions for this customer will
throw an exception.
:type plan: Plan or string (plan ID)
:returns: True if there exists an active subscription, False otherwise.
:throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
"""
if plan is None:
valid_subscriptions = self._get_valid_subscriptions()
if len(valid_subscriptions) == 0:
return False # depends on [control=['if'], data=[]]
elif len(valid_subscriptions) == 1:
return True # depends on [control=['if'], data=[]]
else:
raise TypeError('plan cannot be None if more than one valid subscription exists for this customer.') # depends on [control=['if'], data=[]]
else: # Convert Plan to id
if isinstance(plan, StripeModel):
plan = plan.id # depends on [control=['if'], data=[]]
return any([subscription.is_valid() for subscription in self.subscriptions.filter(plan__id=plan)]) |
def sensor_names(self):
"""Return standard sensor or instrument name for the file's data.
"""
res = self['/attr/instrument_name']
if isinstance(res, np.ndarray):
res = str(res.astype(str))
res = [x.strip() for x in res.split(',')]
if len(res) == 1:
return res[0]
return res | def function[sensor_names, parameter[self]]:
constant[Return standard sensor or instrument name for the file's data.
]
variable[res] assign[=] call[name[self]][constant[/attr/instrument_name]]
if call[name[isinstance], parameter[name[res], name[np].ndarray]] begin[:]
variable[res] assign[=] call[name[str], parameter[call[name[res].astype, parameter[name[str]]]]]
variable[res] assign[=] <ast.ListComp object at 0x7da1b1d6eec0>
if compare[call[name[len], parameter[name[res]]] equal[==] constant[1]] begin[:]
return[call[name[res]][constant[0]]]
return[name[res]] | keyword[def] identifier[sensor_names] ( identifier[self] ):
literal[string]
identifier[res] = identifier[self] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[res] , identifier[np] . identifier[ndarray] ):
identifier[res] = identifier[str] ( identifier[res] . identifier[astype] ( identifier[str] ))
identifier[res] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[res] . identifier[split] ( literal[string] )]
keyword[if] identifier[len] ( identifier[res] )== literal[int] :
keyword[return] identifier[res] [ literal[int] ]
keyword[return] identifier[res] | def sensor_names(self):
"""Return standard sensor or instrument name for the file's data.
"""
res = self['/attr/instrument_name']
if isinstance(res, np.ndarray):
res = str(res.astype(str)) # depends on [control=['if'], data=[]]
res = [x.strip() for x in res.split(',')]
if len(res) == 1:
return res[0] # depends on [control=['if'], data=[]]
return res |
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None | def function[_get_numpy_record_dtype, parameter[self, rec]]:
constant[
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
]
import module[numpy] as alias[np]
variable[cur_dtypes] assign[=] name[rec].dtype
variable[col_names] assign[=] name[cur_dtypes].names
variable[record_type_list] assign[=] list[[]]
variable[has_rec_fix] assign[=] constant[False]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[cur_dtypes]]]]]] begin[:]
variable[curr_type] assign[=] call[name[cur_dtypes]][name[i]]
if compare[name[curr_type] equal[==] call[name[np].dtype, parameter[constant[datetime64[ns]]]]] begin[:]
variable[curr_type] assign[=] constant[datetime64[us]]
variable[has_rec_fix] assign[=] constant[True]
call[name[record_type_list].append, parameter[tuple[[<ast.Call object at 0x7da20c9923b0>, <ast.Name object at 0x7da20c993550>]]]]
return[<ast.IfExp object at 0x7da20c9927a0>] | keyword[def] identifier[_get_numpy_record_dtype] ( identifier[self] , identifier[rec] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
identifier[cur_dtypes] = identifier[rec] . identifier[dtype]
identifier[col_names] = identifier[cur_dtypes] . identifier[names]
identifier[record_type_list] =[]
identifier[has_rec_fix] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[cur_dtypes] )):
identifier[curr_type] = identifier[cur_dtypes] [ identifier[i] ]
keyword[if] identifier[curr_type] == identifier[np] . identifier[dtype] ( literal[string] ):
identifier[curr_type] = literal[string]
identifier[has_rec_fix] = keyword[True]
identifier[record_type_list] . identifier[append] (( identifier[str] ( identifier[col_names] [ identifier[i] ]), identifier[curr_type] ))
keyword[return] identifier[np] . identifier[dtype] ( identifier[record_type_list] ) keyword[if] identifier[has_rec_fix] keyword[else] keyword[None] | def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True # depends on [control=['if'], data=['curr_type']]
record_type_list.append((str(col_names[i]), curr_type)) # depends on [control=['for'], data=['i']]
return np.dtype(record_type_list) if has_rec_fix else None |
def fetch(self, range_start, range_end):
"""
Fetches speeches from the ListarDiscursosPlenario endpoint of the
SessoesReunioes (SessionsReunions) API.
The date range provided should be specified as a string using the
format supported by the API (%d/%m/%Y)
"""
range_dates = {'dataIni': range_start, 'dataFim': range_end}
url = self.URL.format(**range_dates)
xml = urllib.request.urlopen(url)
tree = ET.ElementTree(file=xml)
records = self._parse_speeches(tree.getroot())
return pd.DataFrame(records, columns=[
'session_code',
'session_date',
'session_num',
'phase_code',
'phase_desc',
'speech_speaker_num',
'speech_speaker_name',
'speech_speaker_party',
'speech_speaker_state',
'speech_started_at',
'speech_room_num',
'speech_insertion_num'
]) | def function[fetch, parameter[self, range_start, range_end]]:
constant[
Fetches speeches from the ListarDiscursosPlenario endpoint of the
SessoesReunioes (SessionsReunions) API.
The date range provided should be specified as a string using the
format supported by the API (%d/%m/%Y)
]
variable[range_dates] assign[=] dictionary[[<ast.Constant object at 0x7da20c991990>, <ast.Constant object at 0x7da20c9937f0>], [<ast.Name object at 0x7da20c993100>, <ast.Name object at 0x7da20c992f20>]]
variable[url] assign[=] call[name[self].URL.format, parameter[]]
variable[xml] assign[=] call[name[urllib].request.urlopen, parameter[name[url]]]
variable[tree] assign[=] call[name[ET].ElementTree, parameter[]]
variable[records] assign[=] call[name[self]._parse_speeches, parameter[call[name[tree].getroot, parameter[]]]]
return[call[name[pd].DataFrame, parameter[name[records]]]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[range_start] , identifier[range_end] ):
literal[string]
identifier[range_dates] ={ literal[string] : identifier[range_start] , literal[string] : identifier[range_end] }
identifier[url] = identifier[self] . identifier[URL] . identifier[format] (** identifier[range_dates] )
identifier[xml] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[url] )
identifier[tree] = identifier[ET] . identifier[ElementTree] ( identifier[file] = identifier[xml] )
identifier[records] = identifier[self] . identifier[_parse_speeches] ( identifier[tree] . identifier[getroot] ())
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[records] , identifier[columns] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
]) | def fetch(self, range_start, range_end):
"""
Fetches speeches from the ListarDiscursosPlenario endpoint of the
SessoesReunioes (SessionsReunions) API.
The date range provided should be specified as a string using the
format supported by the API (%d/%m/%Y)
"""
range_dates = {'dataIni': range_start, 'dataFim': range_end}
url = self.URL.format(**range_dates)
xml = urllib.request.urlopen(url)
tree = ET.ElementTree(file=xml)
records = self._parse_speeches(tree.getroot())
return pd.DataFrame(records, columns=['session_code', 'session_date', 'session_num', 'phase_code', 'phase_desc', 'speech_speaker_num', 'speech_speaker_name', 'speech_speaker_party', 'speech_speaker_state', 'speech_started_at', 'speech_room_num', 'speech_insertion_num']) |
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if value == '':
if self.kwargs.get('nullable', __nullable__):
value = None
else:
value = 0
if not isinstance(value, Model):
return super(ReferenceProperty, self).validate(value)
if not value.is_saved():
raise BadValueError(
'%s instance must be saved before it can be stored as a '
'reference' % self.reference_class.__class__.__name__)
if not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' %
(self.name, self.reference_class.__class__.__name__))
return value | def function[validate, parameter[self, value]]:
constant[Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
]
if compare[name[value] equal[==] constant[]] begin[:]
if call[name[self].kwargs.get, parameter[constant[nullable], name[__nullable__]]] begin[:]
variable[value] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da2044c1b40> begin[:]
return[call[call[name[super], parameter[name[ReferenceProperty], name[self]]].validate, parameter[name[value]]]]
if <ast.UnaryOp object at 0x7da2044c37c0> begin[:]
<ast.Raise object at 0x7da2044c3d60>
if <ast.UnaryOp object at 0x7da2044c04c0> begin[:]
<ast.Raise object at 0x7da2044c27d0>
return[name[value]] | keyword[def] identifier[validate] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] == literal[string] :
keyword[if] identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] , identifier[__nullable__] ):
identifier[value] = keyword[None]
keyword[else] :
identifier[value] = literal[int]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[Model] ):
keyword[return] identifier[super] ( identifier[ReferenceProperty] , identifier[self] ). identifier[validate] ( identifier[value] )
keyword[if] keyword[not] identifier[value] . identifier[is_saved] ():
keyword[raise] identifier[BadValueError] (
literal[string]
literal[string] % identifier[self] . identifier[reference_class] . identifier[__class__] . identifier[__name__] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[self] . identifier[reference_class] ):
keyword[raise] identifier[KindError] ( literal[string] %
( identifier[self] . identifier[name] , identifier[self] . identifier[reference_class] . identifier[__class__] . identifier[__name__] ))
keyword[return] identifier[value] | def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if value == '':
if self.kwargs.get('nullable', __nullable__):
value = None # depends on [control=['if'], data=[]]
else:
value = 0 # depends on [control=['if'], data=['value']]
if not isinstance(value, Model):
return super(ReferenceProperty, self).validate(value) # depends on [control=['if'], data=[]]
if not value.is_saved():
raise BadValueError('%s instance must be saved before it can be stored as a reference' % self.reference_class.__class__.__name__) # depends on [control=['if'], data=[]]
if not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' % (self.name, self.reference_class.__class__.__name__)) # depends on [control=['if'], data=[]]
return value |
def nv_tuple_list_replace(l, v):
""" replace a tuple in a tuple list
"""
_found = False
for i, x in enumerate(l):
if x[0] == v[0]:
l[i] = v
_found = True
if not _found:
l.append(v) | def function[nv_tuple_list_replace, parameter[l, v]]:
constant[ replace a tuple in a tuple list
]
variable[_found] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b11122f0>, <ast.Name object at 0x7da1b1111db0>]]] in starred[call[name[enumerate], parameter[name[l]]]] begin[:]
if compare[call[name[x]][constant[0]] equal[==] call[name[v]][constant[0]]] begin[:]
call[name[l]][name[i]] assign[=] name[v]
variable[_found] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b1113250> begin[:]
call[name[l].append, parameter[name[v]]] | keyword[def] identifier[nv_tuple_list_replace] ( identifier[l] , identifier[v] ):
literal[string]
identifier[_found] = keyword[False]
keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[l] ):
keyword[if] identifier[x] [ literal[int] ]== identifier[v] [ literal[int] ]:
identifier[l] [ identifier[i] ]= identifier[v]
identifier[_found] = keyword[True]
keyword[if] keyword[not] identifier[_found] :
identifier[l] . identifier[append] ( identifier[v] ) | def nv_tuple_list_replace(l, v):
""" replace a tuple in a tuple list
"""
_found = False
for (i, x) in enumerate(l):
if x[0] == v[0]:
l[i] = v
_found = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not _found:
l.append(v) # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.