code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_dcap(self, cycle=None, dataset_number=None):
"""Returns discharge_capacity (in mAh/g), and voltage."""
# TODO: should return a DataFrame as default
# but remark that we then have to update e.g. batch_helpers.py
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
dc, v = self._get_cap(cycle, dataset_number, "discharge")
return dc, v | def function[get_dcap, parameter[self, cycle, dataset_number]]:
constant[Returns discharge_capacity (in mAh/g), and voltage.]
variable[dataset_number] assign[=] call[name[self]._validate_dataset_number, parameter[name[dataset_number]]]
if compare[name[dataset_number] is constant[None]] begin[:]
call[name[self]._report_empty_dataset, parameter[]]
return[None]
<ast.Tuple object at 0x7da1b1969270> assign[=] call[name[self]._get_cap, parameter[name[cycle], name[dataset_number], constant[discharge]]]
return[tuple[[<ast.Name object at 0x7da1b1968790>, <ast.Name object at 0x7da1b1968910>]]] | keyword[def] identifier[get_dcap] ( identifier[self] , identifier[cycle] = keyword[None] , identifier[dataset_number] = keyword[None] ):
literal[string]
identifier[dataset_number] = identifier[self] . identifier[_validate_dataset_number] ( identifier[dataset_number] )
keyword[if] identifier[dataset_number] keyword[is] keyword[None] :
identifier[self] . identifier[_report_empty_dataset] ()
keyword[return]
identifier[dc] , identifier[v] = identifier[self] . identifier[_get_cap] ( identifier[cycle] , identifier[dataset_number] , literal[string] )
keyword[return] identifier[dc] , identifier[v] | def get_dcap(self, cycle=None, dataset_number=None):
"""Returns discharge_capacity (in mAh/g), and voltage."""
# TODO: should return a DataFrame as default
# but remark that we then have to update e.g. batch_helpers.py
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return # depends on [control=['if'], data=[]]
(dc, v) = self._get_cap(cycle, dataset_number, 'discharge')
return (dc, v) |
def close(self, close_socket=False):
"""Close the open socket for this manager
:return:
"""
self._bm.stop_socket(self._conn_key)
if close_socket:
self._bm.close()
time.sleep(1)
self._depth_cache = None | def function[close, parameter[self, close_socket]]:
constant[Close the open socket for this manager
:return:
]
call[name[self]._bm.stop_socket, parameter[name[self]._conn_key]]
if name[close_socket] begin[:]
call[name[self]._bm.close, parameter[]]
call[name[time].sleep, parameter[constant[1]]]
name[self]._depth_cache assign[=] constant[None] | keyword[def] identifier[close] ( identifier[self] , identifier[close_socket] = keyword[False] ):
literal[string]
identifier[self] . identifier[_bm] . identifier[stop_socket] ( identifier[self] . identifier[_conn_key] )
keyword[if] identifier[close_socket] :
identifier[self] . identifier[_bm] . identifier[close] ()
identifier[time] . identifier[sleep] ( literal[int] )
identifier[self] . identifier[_depth_cache] = keyword[None] | def close(self, close_socket=False):
"""Close the open socket for this manager
:return:
"""
self._bm.stop_socket(self._conn_key)
if close_socket:
self._bm.close() # depends on [control=['if'], data=[]]
time.sleep(1)
self._depth_cache = None |
def from_entity(cls, entity: Entity) -> 'DictModel':
"""Convert the entity to a dictionary record """
dict_obj = {}
for field_name in entity.meta_.attributes:
dict_obj[field_name] = getattr(entity, field_name)
return dict_obj | def function[from_entity, parameter[cls, entity]]:
constant[Convert the entity to a dictionary record ]
variable[dict_obj] assign[=] dictionary[[], []]
for taget[name[field_name]] in starred[name[entity].meta_.attributes] begin[:]
call[name[dict_obj]][name[field_name]] assign[=] call[name[getattr], parameter[name[entity], name[field_name]]]
return[name[dict_obj]] | keyword[def] identifier[from_entity] ( identifier[cls] , identifier[entity] : identifier[Entity] )-> literal[string] :
literal[string]
identifier[dict_obj] ={}
keyword[for] identifier[field_name] keyword[in] identifier[entity] . identifier[meta_] . identifier[attributes] :
identifier[dict_obj] [ identifier[field_name] ]= identifier[getattr] ( identifier[entity] , identifier[field_name] )
keyword[return] identifier[dict_obj] | def from_entity(cls, entity: Entity) -> 'DictModel':
"""Convert the entity to a dictionary record """
dict_obj = {}
for field_name in entity.meta_.attributes:
dict_obj[field_name] = getattr(entity, field_name) # depends on [control=['for'], data=['field_name']]
return dict_obj |
def close(self):
"""Close and delete instance."""
# remove callbacks
Datastore.stores[self.domain].remove(self)
# delete data after the last instance is gone
if self.release_storage and not Datastore.stores[self.domain]:
del Datastore.global_data[self.domain]
del self | def function[close, parameter[self]]:
constant[Close and delete instance.]
call[call[name[Datastore].stores][name[self].domain].remove, parameter[name[self]]]
if <ast.BoolOp object at 0x7da18f00f730> begin[:]
<ast.Delete object at 0x7da18f00dd80>
<ast.Delete object at 0x7da18f00f970> | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[Datastore] . identifier[stores] [ identifier[self] . identifier[domain] ]. identifier[remove] ( identifier[self] )
keyword[if] identifier[self] . identifier[release_storage] keyword[and] keyword[not] identifier[Datastore] . identifier[stores] [ identifier[self] . identifier[domain] ]:
keyword[del] identifier[Datastore] . identifier[global_data] [ identifier[self] . identifier[domain] ]
keyword[del] identifier[self] | def close(self):
"""Close and delete instance."""
# remove callbacks
Datastore.stores[self.domain].remove(self)
# delete data after the last instance is gone
if self.release_storage and (not Datastore.stores[self.domain]):
del Datastore.global_data[self.domain] # depends on [control=['if'], data=[]]
del self |
def build(package, path=None, dry_run=False, env='default', force=False, build_file=False):
"""
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
"""
# TODO: rename 'path' param to 'target'?
team, _, _, subpath = parse_package(package, allow_subpath=True)
_check_team_id(team)
logged_in_team = _find_logged_in_team()
if logged_in_team is not None and team is None and force is False:
answer = input("You're logged in as a team member, but you aren't specifying "
"a team for the package you're currently building. Maybe you meant:\n"
"quilt build {team}:{package}\n"
"Are you sure you want to continue? (y/N) ".format(
team=logged_in_team, package=package))
if answer.lower() != 'y':
return
# Backward compatibility: if there's no subpath, we're building a top-level package,
# so treat `path` as a build file, not as a data node.
if not subpath:
build_file = True
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env, build_file)
except Exception as ex:
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env) | def function[build, parameter[package, path, dry_run, env, force, build_file]]:
constant[
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
]
<ast.Tuple object at 0x7da1b1234790> assign[=] call[name[parse_package], parameter[name[package]]]
call[name[_check_team_id], parameter[name[team]]]
variable[logged_in_team] assign[=] call[name[_find_logged_in_team], parameter[]]
if <ast.BoolOp object at 0x7da1b12347f0> begin[:]
variable[answer] assign[=] call[name[input], parameter[call[constant[You're logged in as a team member, but you aren't specifying a team for the package you're currently building. Maybe you meant:
quilt build {team}:{package}
Are you sure you want to continue? (y/N) ].format, parameter[]]]]
if compare[call[name[answer].lower, parameter[]] not_equal[!=] constant[y]] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b12374f0> begin[:]
variable[build_file] assign[=] constant[True]
variable[package_hash] assign[=] call[call[name[hashlib].md5, parameter[call[name[package].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]
<ast.Try object at 0x7da1b1235a80>
call[name[_log], parameter[name[team]]] | keyword[def] identifier[build] ( identifier[package] , identifier[path] = keyword[None] , identifier[dry_run] = keyword[False] , identifier[env] = literal[string] , identifier[force] = keyword[False] , identifier[build_file] = keyword[False] ):
literal[string]
identifier[team] , identifier[_] , identifier[_] , identifier[subpath] = identifier[parse_package] ( identifier[package] , identifier[allow_subpath] = keyword[True] )
identifier[_check_team_id] ( identifier[team] )
identifier[logged_in_team] = identifier[_find_logged_in_team] ()
keyword[if] identifier[logged_in_team] keyword[is] keyword[not] keyword[None] keyword[and] identifier[team] keyword[is] keyword[None] keyword[and] identifier[force] keyword[is] keyword[False] :
identifier[answer] = identifier[input] ( literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[team] = identifier[logged_in_team] , identifier[package] = identifier[package] ))
keyword[if] identifier[answer] . identifier[lower] ()!= literal[string] :
keyword[return]
keyword[if] keyword[not] identifier[subpath] :
identifier[build_file] = keyword[True]
identifier[package_hash] = identifier[hashlib] . identifier[md5] ( identifier[package] . identifier[encode] ( literal[string] )). identifier[hexdigest] ()
keyword[try] :
identifier[_build_internal] ( identifier[package] , identifier[path] , identifier[dry_run] , identifier[env] , identifier[build_file] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[_log] ( identifier[team] , identifier[type] = literal[string] , identifier[package] = identifier[package_hash] , identifier[dry_run] = identifier[dry_run] , identifier[env] = identifier[env] , identifier[error] = identifier[str] ( identifier[ex] ))
keyword[raise]
identifier[_log] ( identifier[team] , identifier[type] = literal[string] , identifier[package] = identifier[package_hash] , identifier[dry_run] = identifier[dry_run] , identifier[env] = identifier[env] ) | def build(package, path=None, dry_run=False, env='default', force=False, build_file=False):
"""
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
"""
# TODO: rename 'path' param to 'target'?
(team, _, _, subpath) = parse_package(package, allow_subpath=True)
_check_team_id(team)
logged_in_team = _find_logged_in_team()
if logged_in_team is not None and team is None and (force is False):
answer = input("You're logged in as a team member, but you aren't specifying a team for the package you're currently building. Maybe you meant:\nquilt build {team}:{package}\nAre you sure you want to continue? (y/N) ".format(team=logged_in_team, package=package))
if answer.lower() != 'y':
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Backward compatibility: if there's no subpath, we're building a top-level package,
# so treat `path` as a build file, not as a data node.
if not subpath:
build_file = True # depends on [control=['if'], data=[]]
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env, build_file) # depends on [control=['try'], data=[]]
except Exception as ex:
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise # depends on [control=['except'], data=['ex']]
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env) |
def directions(self, features, profile='mapbox/driving',
alternatives=None, geometries=None, overview=None, steps=None,
continue_straight=None, waypoint_snapping=None, annotations=None,
language=None, **kwargs):
"""Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary.
"""
# backwards compatible, deprecated
if 'geometry' in kwargs and geometries is None:
geometries = kwargs['geometry']
warnings.warn('Use `geometries` instead of `geometry`',
errors.MapboxDeprecationWarning)
annotations = self._validate_annotations(annotations)
coordinates = encode_coordinates(
features, precision=6, min_limit=2, max_limit=25)
geometries = self._validate_geom_encoding(geometries)
overview = self._validate_geom_overview(overview)
profile = self._validate_profile(profile)
bearings, radii = self._validate_snapping(waypoint_snapping, features)
params = {}
if alternatives is not None:
params.update(
{'alternatives': 'true' if alternatives is True else 'false'})
if geometries is not None:
params.update({'geometries': geometries})
if overview is not None:
params.update(
{'overview': 'false' if overview is False else overview})
if steps is not None:
params.update(
{'steps': 'true' if steps is True else 'false'})
if continue_straight is not None:
params.update(
{'continue_straight': 'true' if steps is True else 'false'})
if annotations is not None:
params.update({'annotations': ','.join(annotations)})
if language is not None:
params.update({'language': language})
if radii is not None:
params.update(
{'radiuses': ';'.join(str(r) for r in radii)})
if bearings is not None:
params.update(
{'bearings': ';'.join(self._encode_bearing(b) for b in bearings)})
profile_ns, profile_name = profile.split('/')
uri = URITemplate(
self.baseuri + '/{profile_ns}/{profile_name}/{coordinates}.json').expand(
profile_ns=profile_ns, profile_name=profile_name, coordinates=coordinates)
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
def geojson():
return self._geojson(resp.json(), geom_format=geometries)
resp.geojson = geojson
return resp | def function[directions, parameter[self, features, profile, alternatives, geometries, overview, steps, continue_straight, waypoint_snapping, annotations, language]]:
constant[Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary.
]
if <ast.BoolOp object at 0x7da1b1783bb0> begin[:]
variable[geometries] assign[=] call[name[kwargs]][constant[geometry]]
call[name[warnings].warn, parameter[constant[Use `geometries` instead of `geometry`], name[errors].MapboxDeprecationWarning]]
variable[annotations] assign[=] call[name[self]._validate_annotations, parameter[name[annotations]]]
variable[coordinates] assign[=] call[name[encode_coordinates], parameter[name[features]]]
variable[geometries] assign[=] call[name[self]._validate_geom_encoding, parameter[name[geometries]]]
variable[overview] assign[=] call[name[self]._validate_geom_overview, parameter[name[overview]]]
variable[profile] assign[=] call[name[self]._validate_profile, parameter[name[profile]]]
<ast.Tuple object at 0x7da1b17dbdc0> assign[=] call[name[self]._validate_snapping, parameter[name[waypoint_snapping], name[features]]]
variable[params] assign[=] dictionary[[], []]
if compare[name[alternatives] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17d88e0>], [<ast.IfExp object at 0x7da1b17d8880>]]]]
if compare[name[geometries] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17db970>], [<ast.Name object at 0x7da1b17d9720>]]]]
if compare[name[overview] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17da6e0>], [<ast.IfExp object at 0x7da1b17d9b40>]]]]
if compare[name[steps] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17da230>], [<ast.IfExp object at 0x7da1b17d9a50>]]]]
if compare[name[continue_straight] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1735210>], [<ast.IfExp object at 0x7da1b1735bd0>]]]]
if compare[name[annotations] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17370a0>], [<ast.Call object at 0x7da1b17366e0>]]]]
if compare[name[language] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17373d0>], [<ast.Name object at 0x7da1b1734d60>]]]]
if compare[name[radii] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b17356f0>], [<ast.Call object at 0x7da1b1734820>]]]]
if compare[name[bearings] is_not constant[None]] begin[:]
call[name[params].update, parameter[dictionary[[<ast.Constant object at 0x7da1b18975e0>], [<ast.Call object at 0x7da1b1896440>]]]]
<ast.Tuple object at 0x7da1b17e3a60> assign[=] call[name[profile].split, parameter[constant[/]]]
variable[uri] assign[=] call[call[name[URITemplate], parameter[binary_operation[name[self].baseuri + constant[/{profile_ns}/{profile_name}/{coordinates}.json]]]].expand, parameter[]]
variable[resp] assign[=] call[name[self].session.get, parameter[name[uri]]]
call[name[self].handle_http_error, parameter[name[resp]]]
def function[geojson, parameter[]]:
return[call[name[self]._geojson, parameter[call[name[resp].json, parameter[]]]]]
name[resp].geojson assign[=] name[geojson]
return[name[resp]] | keyword[def] identifier[directions] ( identifier[self] , identifier[features] , identifier[profile] = literal[string] ,
identifier[alternatives] = keyword[None] , identifier[geometries] = keyword[None] , identifier[overview] = keyword[None] , identifier[steps] = keyword[None] ,
identifier[continue_straight] = keyword[None] , identifier[waypoint_snapping] = keyword[None] , identifier[annotations] = keyword[None] ,
identifier[language] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[geometries] keyword[is] keyword[None] :
identifier[geometries] = identifier[kwargs] [ literal[string] ]
identifier[warnings] . identifier[warn] ( literal[string] ,
identifier[errors] . identifier[MapboxDeprecationWarning] )
identifier[annotations] = identifier[self] . identifier[_validate_annotations] ( identifier[annotations] )
identifier[coordinates] = identifier[encode_coordinates] (
identifier[features] , identifier[precision] = literal[int] , identifier[min_limit] = literal[int] , identifier[max_limit] = literal[int] )
identifier[geometries] = identifier[self] . identifier[_validate_geom_encoding] ( identifier[geometries] )
identifier[overview] = identifier[self] . identifier[_validate_geom_overview] ( identifier[overview] )
identifier[profile] = identifier[self] . identifier[_validate_profile] ( identifier[profile] )
identifier[bearings] , identifier[radii] = identifier[self] . identifier[_validate_snapping] ( identifier[waypoint_snapping] , identifier[features] )
identifier[params] ={}
keyword[if] identifier[alternatives] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] (
{ literal[string] : literal[string] keyword[if] identifier[alternatives] keyword[is] keyword[True] keyword[else] literal[string] })
keyword[if] identifier[geometries] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] ({ literal[string] : identifier[geometries] })
keyword[if] identifier[overview] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] (
{ literal[string] : literal[string] keyword[if] identifier[overview] keyword[is] keyword[False] keyword[else] identifier[overview] })
keyword[if] identifier[steps] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] (
{ literal[string] : literal[string] keyword[if] identifier[steps] keyword[is] keyword[True] keyword[else] literal[string] })
keyword[if] identifier[continue_straight] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] (
{ literal[string] : literal[string] keyword[if] identifier[steps] keyword[is] keyword[True] keyword[else] literal[string] })
keyword[if] identifier[annotations] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] ({ literal[string] : literal[string] . identifier[join] ( identifier[annotations] )})
keyword[if] identifier[language] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] ({ literal[string] : identifier[language] })
keyword[if] identifier[radii] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] (
{ literal[string] : literal[string] . identifier[join] ( identifier[str] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[radii] )})
keyword[if] identifier[bearings] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] (
{ literal[string] : literal[string] . identifier[join] ( identifier[self] . identifier[_encode_bearing] ( identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[bearings] )})
identifier[profile_ns] , identifier[profile_name] = identifier[profile] . identifier[split] ( literal[string] )
identifier[uri] = identifier[URITemplate] (
identifier[self] . identifier[baseuri] + literal[string] ). identifier[expand] (
identifier[profile_ns] = identifier[profile_ns] , identifier[profile_name] = identifier[profile_name] , identifier[coordinates] = identifier[coordinates] )
identifier[resp] = identifier[self] . identifier[session] . identifier[get] ( identifier[uri] , identifier[params] = identifier[params] )
identifier[self] . identifier[handle_http_error] ( identifier[resp] )
keyword[def] identifier[geojson] ():
keyword[return] identifier[self] . identifier[_geojson] ( identifier[resp] . identifier[json] (), identifier[geom_format] = identifier[geometries] )
identifier[resp] . identifier[geojson] = identifier[geojson]
keyword[return] identifier[resp] | def directions(self, features, profile='mapbox/driving', alternatives=None, geometries=None, overview=None, steps=None, continue_straight=None, waypoint_snapping=None, annotations=None, language=None, **kwargs):
"""Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary.
"""
# backwards compatible, deprecated
if 'geometry' in kwargs and geometries is None:
geometries = kwargs['geometry']
warnings.warn('Use `geometries` instead of `geometry`', errors.MapboxDeprecationWarning) # depends on [control=['if'], data=[]]
annotations = self._validate_annotations(annotations)
coordinates = encode_coordinates(features, precision=6, min_limit=2, max_limit=25)
geometries = self._validate_geom_encoding(geometries)
overview = self._validate_geom_overview(overview)
profile = self._validate_profile(profile)
(bearings, radii) = self._validate_snapping(waypoint_snapping, features)
params = {}
if alternatives is not None:
params.update({'alternatives': 'true' if alternatives is True else 'false'}) # depends on [control=['if'], data=['alternatives']]
if geometries is not None:
params.update({'geometries': geometries}) # depends on [control=['if'], data=['geometries']]
if overview is not None:
params.update({'overview': 'false' if overview is False else overview}) # depends on [control=['if'], data=['overview']]
if steps is not None:
params.update({'steps': 'true' if steps is True else 'false'}) # depends on [control=['if'], data=['steps']]
if continue_straight is not None:
params.update({'continue_straight': 'true' if steps is True else 'false'}) # depends on [control=['if'], data=[]]
if annotations is not None:
params.update({'annotations': ','.join(annotations)}) # depends on [control=['if'], data=['annotations']]
if language is not None:
params.update({'language': language}) # depends on [control=['if'], data=['language']]
if radii is not None:
params.update({'radiuses': ';'.join((str(r) for r in radii))}) # depends on [control=['if'], data=['radii']]
if bearings is not None:
params.update({'bearings': ';'.join((self._encode_bearing(b) for b in bearings))}) # depends on [control=['if'], data=['bearings']]
(profile_ns, profile_name) = profile.split('/')
uri = URITemplate(self.baseuri + '/{profile_ns}/{profile_name}/{coordinates}.json').expand(profile_ns=profile_ns, profile_name=profile_name, coordinates=coordinates)
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
def geojson():
return self._geojson(resp.json(), geom_format=geometries)
resp.geojson = geojson
return resp |
def lookup(self, iterable, gather=False):
"""Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init
"""
for result in self.root.lookup(iterable,
gather=gather,
edit_distance=0,
max_edit_distance=self.max_edit_distance,
match_threshold=self.match_threshold):
yield result | def function[lookup, parameter[self, iterable, gather]]:
constant[Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init
]
for taget[name[result]] in starred[call[name[self].root.lookup, parameter[name[iterable]]]] begin[:]
<ast.Yield object at 0x7da1b0881240> | keyword[def] identifier[lookup] ( identifier[self] , identifier[iterable] , identifier[gather] = keyword[False] ):
literal[string]
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[root] . identifier[lookup] ( identifier[iterable] ,
identifier[gather] = identifier[gather] ,
identifier[edit_distance] = literal[int] ,
identifier[max_edit_distance] = identifier[self] . identifier[max_edit_distance] ,
identifier[match_threshold] = identifier[self] . identifier[match_threshold] ):
keyword[yield] identifier[result] | def lookup(self, iterable, gather=False):
"""Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init
"""
for result in self.root.lookup(iterable, gather=gather, edit_distance=0, max_edit_distance=self.max_edit_distance, match_threshold=self.match_threshold):
yield result # depends on [control=['for'], data=['result']] |
def _write_csv(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
kwargs_open = {'newline': ''}
mode = 'w'
if sys.version_info < (3, 0):
kwargs_open.pop('newline', None)
mode = 'wb'
with open(filepath, mode, **kwargs_open) as fp:
if 'delimiter' not in kwargs:
kwargs['delimiter'] = ','
if 'quotechar' not in kwargs:
kwargs['quotechar'] = '"'
with open(filepath, 'w') as fp:
writer = csv.writer(fp, **kwargs)
writer.writerows(data)
return data | def function[_write_csv, parameter[filepath, data, kwargs]]:
constant[See documentation of mpu.io.write.]
variable[kwargs_open] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a5cbb0>], [<ast.Constant object at 0x7da1b1a5e080>]]
variable[mode] assign[=] constant[w]
if compare[name[sys].version_info less[<] tuple[[<ast.Constant object at 0x7da1b1a5fcd0>, <ast.Constant object at 0x7da1b1a5e1a0>]]] begin[:]
call[name[kwargs_open].pop, parameter[constant[newline], constant[None]]]
variable[mode] assign[=] constant[wb]
with call[name[open], parameter[name[filepath], name[mode]]] begin[:]
if compare[constant[delimiter] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[delimiter]] assign[=] constant[,]
if compare[constant[quotechar] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[quotechar]] assign[=] constant["]
with call[name[open], parameter[name[filepath], constant[w]]] begin[:]
variable[writer] assign[=] call[name[csv].writer, parameter[name[fp]]]
call[name[writer].writerows, parameter[name[data]]]
return[name[data]] | keyword[def] identifier[_write_csv] ( identifier[filepath] , identifier[data] , identifier[kwargs] ):
literal[string]
identifier[kwargs_open] ={ literal[string] : literal[string] }
identifier[mode] = literal[string]
keyword[if] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ):
identifier[kwargs_open] . identifier[pop] ( literal[string] , keyword[None] )
identifier[mode] = literal[string]
keyword[with] identifier[open] ( identifier[filepath] , identifier[mode] ,** identifier[kwargs_open] ) keyword[as] identifier[fp] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[with] identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[fp] :
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[fp] ,** identifier[kwargs] )
identifier[writer] . identifier[writerows] ( identifier[data] )
keyword[return] identifier[data] | def _write_csv(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
kwargs_open = {'newline': ''}
mode = 'w'
if sys.version_info < (3, 0):
kwargs_open.pop('newline', None)
mode = 'wb' # depends on [control=['if'], data=[]]
with open(filepath, mode, **kwargs_open) as fp:
if 'delimiter' not in kwargs:
kwargs['delimiter'] = ',' # depends on [control=['if'], data=['kwargs']]
if 'quotechar' not in kwargs:
kwargs['quotechar'] = '"' # depends on [control=['if'], data=['kwargs']]
with open(filepath, 'w') as fp:
writer = csv.writer(fp, **kwargs)
writer.writerows(data) # depends on [control=['with'], data=['fp']] # depends on [control=['with'], data=['open', 'fp']]
return data |
def make_score_tabular(
row_lbls, col_lbls, values, title=None, out_of=None, bold_best=False,
flip=False, bigger_is_better=True, multicol_lbls=None, FORCE_INT=False,
precision=None, SHORTEN_ROW_LBLS=False, col_align='l', col_sep='|',
multicol_sep='|', centerline=True, astable=False, table_position='',
AUTOFIX_LATEX=True, **kwargs):
r"""
makes a LaTeX tabular for displaying scores or errors
Args:
row_lbls (list of str):
col_lbls (list of str):
values (ndarray):
title (str): (default = None)
out_of (None): (default = None)
bold_best (bool): (default = True)
flip (bool): (default = False)
table_position (str) : eg '[h]'
Returns:
str: tabular_str
CommandLine:
python -m utool.util_latex --test-make_score_tabular:0 --show
python -m utool.util_latex --test-make_score_tabular:1 --show
python -m utool.util_latex --test-make_score_tabular:2 --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \leq 1', 'metric2']
>>> values = np.array([[1.2, 2], [3.2, 4]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1']
>>> col_lbls = ['score \leq 1', 'metric2']
>>> values = np.array([[1.2, 2]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \leq 1', 'metric2', 'foobar']
>>> multicol_lbls = [('spam', 1), ('eggs', 2)]
>>> values = np.array([[1.2, 2, -3], [3.2, 4, -2]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip, multicol_lbls=multicol_lbls)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
"""
import utool as ut
if flip:
bigger_is_better = not bigger_is_better
flip_repltups = [
('<=', '>'),
('>', '<='),
('\\leq', '\\gt'),
('\\geq', '\\lt'),
('score', 'error')
]
col_lbls = [replace_all(lbl, flip_repltups) for lbl in col_lbls]
if title is not None:
title = replace_all(title, flip_repltups)
if out_of is not None:
values = out_of - values
# Abbreviate based on common substrings
common_rowlbl = None
if SHORTEN_ROW_LBLS:
if isinstance(row_lbls, list):
row_lbl_list = row_lbls
else:
row_lbl_list = row_lbls.flatten().tolist()
# Split the rob labels into the alg components
#algcomp_list = [lbl.split(')_') for lbl in row_lbl_list]
longest = long_substr(row_lbl_list)
common_strs = []
while len(longest) > 10:
common_strs += [longest]
row_lbl_list = [row.replace(longest, '...') for row in row_lbl_list]
longest = long_substr(row_lbl_list)
common_rowlbl = ('...'.join(common_strs)).replace(')_', ')_\n')
row_lbls = row_lbl_list
if len(row_lbl_list) == 1:
common_rowlbl = row_lbl_list[0]
row_lbls = ['0']
# Stack values into a tabular body
# TODO: need ability to specify datatypes
def ensurelist(row_values):
try:
return row_values.tolist()
except AttributeError:
return row_values
if False:
# Numpy formatting
def padvec(shape=(1, 1)):
pad = np.array([[' ' for c in range(shape[1])] for r in range(shape[0])])
return pad
col_lbls = ensure_rowvec(col_lbls)
row_lbls = ensure_colvec(row_lbls)
_0 = np.vstack([padvec(), row_lbls])
_1 = np.vstack([col_lbls, values])
body = np.hstack([_0, _1])
body = [[str_ for str_ in row] for row in body]
else:
assert len(row_lbls) == len(values)
body = [[' '] + col_lbls]
body += [[row_lbl] + ensurelist(row_values) for row_lbl, row_values in zip(row_lbls, values)]
#import utool as ut
# Fix things in each body cell
DO_PERCENT = True
try:
for r in range(len(body)):
for c in range(len(body[0])):
# In data land
if r > 0 and c > 0:
if precision is not None:
# Hack
if ut.is_float(body[r][c]):
fmtstr = '%.' + str(precision) + 'f'
body[r][c] = fmtstr % (float(body[r][c]),)
# Force integer
if FORCE_INT:
body[r][c] = str(int(float(body[r][c])))
body[r][c] = str(body[r][c])
# Remove bad formatting;
if AUTOFIX_LATEX:
body[r][c] = escape_latex(body[r][c])
except Exception as ex:
import utool as ut
print('len(row_lbls) = %r' % (len(row_lbls),))
print('len(col_lbls) = %r' % (len(col_lbls),))
print('len(values) = %r' % (values,))
print('ut.depth_profile(values) = %r' % (ut.depth_profile(values),))
ut.printex(ex, keys=['r', 'c'])
raise
# Bold the best values
if bold_best:
best_col_scores = values.max(0) if bigger_is_better else values.min(0)
rows_to_bold = [np.where(values[:, colx] == best_col_scores[colx])[0]
for colx in range(len(values.T))]
for colx, rowx_list in enumerate(rows_to_bold):
for rowx in rowx_list:
body[rowx + 1][colx + 1] = '\\txtbf{' + body[rowx + 1][colx + 1] + '}'
# More fixing after the bold is in place
for r in range(len(body)):
for c in range(len(body[0])):
# In data land
if r > 0 and c > 0:
if out_of is not None:
body[r][c] = body[r][c] + '/' + str(out_of)
if DO_PERCENT:
percent = ' = %.1f%%' % float(100 * values[r - 1, c - 1] / out_of)
body[r][c] += escape_latex(percent)
# Align columns for pretty printing
body = np.array(body)
ALIGN_BODY = True
if ALIGN_BODY:
new_body_cols = []
for col in body.T:
colstrs = list(map(str, ensurelist(col)))
collens = list(map(len, colstrs))
maxlen = max(collens)
newcols = [str_ + (' ' * (maxlen - len(str_))) for str_ in colstrs]
new_body_cols += [newcols]
body = np.array(new_body_cols).T
# Build Body (and row layout)
HLINE_SEP = True
rowvalsep = ''
colvalsep = ' & '
endl = '\\\\\n'
hline = r'\hline'
#extra_rowsep_pos_list = [1] # rows to insert an extra hline after
extra_rowsep_pos_list = [] # rows to insert an extra hline after
if HLINE_SEP:
rowvalsep = hline + '\n'
# rowstr list holds blocks of rows
rowstr_list = [colvalsep.join(row) + endl for row in body]
#rowstr_list = [row[0] + rowlbl_sep + colvalsep.join(row[1:]) + endl for row in body]
#rowstr_list = [(
# ('' if len(row) == 0 else row[0])
# if len(row) <= 1 else
# row[0] + rowlblcol_sep + colvalsep.join(row[1:]) + endl)
# for row in body]
rowsep_list = [rowvalsep for row in rowstr_list[0:-1]] # should be len 1 less than rowstr_list
# Insert multicolumn names
if multicol_lbls is not None:
# TODO: label of the row labels
multicol_sep
multicols = [latex_multicolumn(multicol, size, 'c' + multicol_sep) for multicol, size in multicol_lbls]
multicol_str = latex_multirow('', 2) + colvalsep + colvalsep.join(multicols) + endl
ncols = sum([tup[1] for tup in multicol_lbls])
mcol_sep = '\\cline{2-%d}\n' % (ncols + 1,)
rowstr_list = [multicol_str] + rowstr_list
rowsep_list = [mcol_sep] + rowsep_list
#extra_rowsep_pos_list += [1]
# Insert title
if title is not None and not astable:
tex_title = latex_multicolumn(title, len(body[0])) + endl
rowstr_list = [tex_title] + rowstr_list
rowsep_list = [rowvalsep] + rowsep_list
#extra_rowsep_pos_list += [2]
# Apply an extra hline (for label)
#extra_rowsep_pos_list = []
for pos in sorted(extra_rowsep_pos_list)[::-1]:
rowstr_list.insert(pos, '')
rowsep_list.insert(pos, rowvalsep)
#tabular_body = rowvalsep.join(rowstr_list)
from six.moves import zip_longest
tabular_body = ''.join([row if sep is None else row + sep for row, sep in zip_longest(rowstr_list, rowsep_list)])
# Build Column Layout
col_align_list = [col_align] * len(body[0])
#extra_collayoutsep_pos_list = [1]
extra_collayoutsep_pos_list = []
for pos in sorted(extra_collayoutsep_pos_list)[::-1]:
col_align_list.insert(pos, '')
#col_layaout_sep_list = rowlblcol_sep # TODO
rowlblcol_sep = '|'
# Build build internal seprations between column alignments
# Defaults to just the normal col_sep
col_align_sep_list = [col_sep] * (len(col_align_list) - 1)
# Adjust for the separations between row labels and the actual row data
if len(col_align_sep_list) > 0:
col_align_sep_list[0] = rowlblcol_sep
# Continue multicolumn sepratation
if multicol_lbls is not None:
multicol_offsets = ut.cumsum(ut.get_list_column(multicol_lbls, 1))
for offset in multicol_offsets:
if offset < len(col_align_sep_list):
col_align_sep_list[offset] = multicol_sep
from six.moves import zip_longest
_tmp = [ut.filter_Nones(tup) for tup in zip_longest(col_align_list, col_align_sep_list)]
col_layout = ''.join(ut.flatten(_tmp))
#if len(col_align_list) > 1:
# col_layout = col_align_list[0] + rowlblcol_sep + col_sep.join(col_align_list[1:])
#else:
# col_layout = col_sep.join(col_align_list)
tabular_head = (r'\begin{tabular}{|%s|}' % col_layout) + '\n'
tabular_tail = r'\end{tabular}'
if centerline:
tabular_head = r'\centerline{' + '\n' + tabular_head
tabular_tail = tabular_tail + '}'
if astable:
#tabular_head = r'\begin{centering}' + '\n' + tabular_head
tabular_head = r'\centering' + '\n' + tabular_head
tabular_head = r'\begin{table}' + table_position + '\n' + tabular_head
lblstr = latex_sanitize_command_name(kwargs.get('label', title))
caption = title
if AUTOFIX_LATEX:
caption = escape_latex(caption)
caption = '\n% ---\n' + caption + '\n% ---\n'
#tabular_head = r'\end{centering}' + '\n' + tabular_head
tabular_tail = tabular_tail + '\n\caption[%s]{%s}\n\label{tbl:%s}\n\end{table}' % (lblstr, caption, lblstr)
tabular_str = rowvalsep.join([tabular_head, tabular_body, tabular_tail])
topsep = '\\hline\n' if True else '\\toprule\n'
botsep = '\\hline\n' if True else '\\bottomrule\n'
tabular_str = tabular_head + topsep + tabular_body + botsep + tabular_tail
if common_rowlbl is not None:
#tabular_str += escape_latex('\n\nThe following parameters were held fixed:\n' + common_rowlbl)
pass
return tabular_str | def function[make_score_tabular, parameter[row_lbls, col_lbls, values, title, out_of, bold_best, flip, bigger_is_better, multicol_lbls, FORCE_INT, precision, SHORTEN_ROW_LBLS, col_align, col_sep, multicol_sep, centerline, astable, table_position, AUTOFIX_LATEX]]:
constant[
makes a LaTeX tabular for displaying scores or errors
Args:
row_lbls (list of str):
col_lbls (list of str):
values (ndarray):
title (str): (default = None)
out_of (None): (default = None)
bold_best (bool): (default = True)
flip (bool): (default = False)
table_position (str) : eg '[h]'
Returns:
str: tabular_str
CommandLine:
python -m utool.util_latex --test-make_score_tabular:0 --show
python -m utool.util_latex --test-make_score_tabular:1 --show
python -m utool.util_latex --test-make_score_tabular:2 --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \leq 1', 'metric2']
>>> values = np.array([[1.2, 2], [3.2, 4]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1']
>>> col_lbls = ['score \leq 1', 'metric2']
>>> values = np.array([[1.2, 2]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \leq 1', 'metric2', 'foobar']
>>> multicol_lbls = [('spam', 1), ('eggs', 2)]
>>> values = np.array([[1.2, 2, -3], [3.2, 4, -2]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip, multicol_lbls=multicol_lbls)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
]
import module[utool] as alias[ut]
if name[flip] begin[:]
variable[bigger_is_better] assign[=] <ast.UnaryOp object at 0x7da1b23d29b0>
variable[flip_repltups] assign[=] list[[<ast.Tuple object at 0x7da1b23d2890>, <ast.Tuple object at 0x7da1b23d2800>, <ast.Tuple object at 0x7da1b23d2770>, <ast.Tuple object at 0x7da1b23d26e0>, <ast.Tuple object at 0x7da1b23d2650>]]
variable[col_lbls] assign[=] <ast.ListComp object at 0x7da1b23d2560>
if compare[name[title] is_not constant[None]] begin[:]
variable[title] assign[=] call[name[replace_all], parameter[name[title], name[flip_repltups]]]
if compare[name[out_of] is_not constant[None]] begin[:]
variable[values] assign[=] binary_operation[name[out_of] - name[values]]
variable[common_rowlbl] assign[=] constant[None]
if name[SHORTEN_ROW_LBLS] begin[:]
if call[name[isinstance], parameter[name[row_lbls], name[list]]] begin[:]
variable[row_lbl_list] assign[=] name[row_lbls]
variable[longest] assign[=] call[name[long_substr], parameter[name[row_lbl_list]]]
variable[common_strs] assign[=] list[[]]
while compare[call[name[len], parameter[name[longest]]] greater[>] constant[10]] begin[:]
<ast.AugAssign object at 0x7da1b23d19c0>
variable[row_lbl_list] assign[=] <ast.ListComp object at 0x7da1b23d18a0>
variable[longest] assign[=] call[name[long_substr], parameter[name[row_lbl_list]]]
variable[common_rowlbl] assign[=] call[call[constant[...].join, parameter[name[common_strs]]].replace, parameter[constant[)_], constant[)_
]]]
variable[row_lbls] assign[=] name[row_lbl_list]
if compare[call[name[len], parameter[name[row_lbl_list]]] equal[==] constant[1]] begin[:]
variable[common_rowlbl] assign[=] call[name[row_lbl_list]][constant[0]]
variable[row_lbls] assign[=] list[[<ast.Constant object at 0x7da1b23d10c0>]]
def function[ensurelist, parameter[row_values]]:
<ast.Try object at 0x7da1b23d1000>
if constant[False] begin[:]
def function[padvec, parameter[shape]]:
variable[pad] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b23d0b80>]]
return[name[pad]]
variable[col_lbls] assign[=] call[name[ensure_rowvec], parameter[name[col_lbls]]]
variable[row_lbls] assign[=] call[name[ensure_colvec], parameter[name[row_lbls]]]
variable[_0] assign[=] call[name[np].vstack, parameter[list[[<ast.Call object at 0x7da1b23d0460>, <ast.Name object at 0x7da1b23d0400>]]]]
variable[_1] assign[=] call[name[np].vstack, parameter[list[[<ast.Name object at 0x7da1b23d02b0>, <ast.Name object at 0x7da1b23d0280>]]]]
variable[body] assign[=] call[name[np].hstack, parameter[list[[<ast.Name object at 0x7da1b23d0130>, <ast.Name object at 0x7da1b23d0100>]]]]
variable[body] assign[=] <ast.ListComp object at 0x7da1b23d0070>
variable[DO_PERCENT] assign[=] constant[True]
<ast.Try object at 0x7da1b23c3760>
if name[bold_best] begin[:]
variable[best_col_scores] assign[=] <ast.IfExp object at 0x7da1b235a950>
variable[rows_to_bold] assign[=] <ast.ListComp object at 0x7da1b2359d80>
for taget[tuple[[<ast.Name object at 0x7da1b235b0d0>, <ast.Name object at 0x7da1b235ad40>]]] in starred[call[name[enumerate], parameter[name[rows_to_bold]]]] begin[:]
for taget[name[rowx]] in starred[name[rowx_list]] begin[:]
call[call[name[body]][binary_operation[name[rowx] + constant[1]]]][binary_operation[name[colx] + constant[1]]] assign[=] binary_operation[binary_operation[constant[\txtbf{] + call[call[name[body]][binary_operation[name[rowx] + constant[1]]]][binary_operation[name[colx] + constant[1]]]] + constant[}]]
for taget[name[r]] in starred[call[name[range], parameter[call[name[len], parameter[name[body]]]]]] begin[:]
for taget[name[c]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[body]][constant[0]]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2359780> begin[:]
if compare[name[out_of] is_not constant[None]] begin[:]
call[call[name[body]][name[r]]][name[c]] assign[=] binary_operation[binary_operation[call[call[name[body]][name[r]]][name[c]] + constant[/]] + call[name[str], parameter[name[out_of]]]]
if name[DO_PERCENT] begin[:]
variable[percent] assign[=] binary_operation[constant[ = %.1f%%] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[binary_operation[binary_operation[constant[100] * call[name[values]][tuple[[<ast.BinOp object at 0x7da1b2359870>, <ast.BinOp object at 0x7da1b235b7f0>]]]] / name[out_of]]]]]
<ast.AugAssign object at 0x7da1b235be50>
variable[body] assign[=] call[name[np].array, parameter[name[body]]]
variable[ALIGN_BODY] assign[=] constant[True]
if name[ALIGN_BODY] begin[:]
variable[new_body_cols] assign[=] list[[]]
for taget[name[col]] in starred[name[body].T] begin[:]
variable[colstrs] assign[=] call[name[list], parameter[call[name[map], parameter[name[str], call[name[ensurelist], parameter[name[col]]]]]]]
variable[collens] assign[=] call[name[list], parameter[call[name[map], parameter[name[len], name[colstrs]]]]]
variable[maxlen] assign[=] call[name[max], parameter[name[collens]]]
variable[newcols] assign[=] <ast.ListComp object at 0x7da1b24b7100>
<ast.AugAssign object at 0x7da1b24b6860>
variable[body] assign[=] call[name[np].array, parameter[name[new_body_cols]]].T
variable[HLINE_SEP] assign[=] constant[True]
variable[rowvalsep] assign[=] constant[]
variable[colvalsep] assign[=] constant[ & ]
variable[endl] assign[=] constant[\\
]
variable[hline] assign[=] constant[\hline]
variable[extra_rowsep_pos_list] assign[=] list[[]]
if name[HLINE_SEP] begin[:]
variable[rowvalsep] assign[=] binary_operation[name[hline] + constant[
]]
variable[rowstr_list] assign[=] <ast.ListComp object at 0x7da1b24b5030>
variable[rowsep_list] assign[=] <ast.ListComp object at 0x7da1b24b74f0>
if compare[name[multicol_lbls] is_not constant[None]] begin[:]
name[multicol_sep]
variable[multicols] assign[=] <ast.ListComp object at 0x7da1b24b6740>
variable[multicol_str] assign[=] binary_operation[binary_operation[binary_operation[call[name[latex_multirow], parameter[constant[], constant[2]]] + name[colvalsep]] + call[name[colvalsep].join, parameter[name[multicols]]]] + name[endl]]
variable[ncols] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b24b5ab0>]]
variable[mcol_sep] assign[=] binary_operation[constant[\cline{2-%d}
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b24b5fc0>]]]
variable[rowstr_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24b7070>]] + name[rowstr_list]]
variable[rowsep_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24b64a0>]] + name[rowsep_list]]
if <ast.BoolOp object at 0x7da1b24b5a80> begin[:]
variable[tex_title] assign[=] binary_operation[call[name[latex_multicolumn], parameter[name[title], call[name[len], parameter[call[name[body]][constant[0]]]]]] + name[endl]]
variable[rowstr_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24b6c80>]] + name[rowstr_list]]
variable[rowsep_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24b4700>]] + name[rowsep_list]]
for taget[name[pos]] in starred[call[call[name[sorted], parameter[name[extra_rowsep_pos_list]]]][<ast.Slice object at 0x7da1b24aead0>]] begin[:]
call[name[rowstr_list].insert, parameter[name[pos], constant[]]]
call[name[rowsep_list].insert, parameter[name[pos], name[rowvalsep]]]
from relative_module[six.moves] import module[zip_longest]
variable[tabular_body] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da1b24ac490>]]
variable[col_align_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24afa00>]] * call[name[len], parameter[call[name[body]][constant[0]]]]]
variable[extra_collayoutsep_pos_list] assign[=] list[[]]
for taget[name[pos]] in starred[call[call[name[sorted], parameter[name[extra_collayoutsep_pos_list]]]][<ast.Slice object at 0x7da1b24aca00>]] begin[:]
call[name[col_align_list].insert, parameter[name[pos], constant[]]]
variable[rowlblcol_sep] assign[=] constant[|]
variable[col_align_sep_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b24af5b0>]] * binary_operation[call[name[len], parameter[name[col_align_list]]] - constant[1]]]
if compare[call[name[len], parameter[name[col_align_sep_list]]] greater[>] constant[0]] begin[:]
call[name[col_align_sep_list]][constant[0]] assign[=] name[rowlblcol_sep]
if compare[name[multicol_lbls] is_not constant[None]] begin[:]
variable[multicol_offsets] assign[=] call[name[ut].cumsum, parameter[call[name[ut].get_list_column, parameter[name[multicol_lbls], constant[1]]]]]
for taget[name[offset]] in starred[name[multicol_offsets]] begin[:]
if compare[name[offset] less[<] call[name[len], parameter[name[col_align_sep_list]]]] begin[:]
call[name[col_align_sep_list]][name[offset]] assign[=] name[multicol_sep]
from relative_module[six.moves] import module[zip_longest]
variable[_tmp] assign[=] <ast.ListComp object at 0x7da1b2424100>
variable[col_layout] assign[=] call[constant[].join, parameter[call[name[ut].flatten, parameter[name[_tmp]]]]]
variable[tabular_head] assign[=] binary_operation[binary_operation[constant[\begin{tabular}{|%s|}] <ast.Mod object at 0x7da2590d6920> name[col_layout]] + constant[
]]
variable[tabular_tail] assign[=] constant[\end{tabular}]
if name[centerline] begin[:]
variable[tabular_head] assign[=] binary_operation[binary_operation[constant[\centerline{] + constant[
]] + name[tabular_head]]
variable[tabular_tail] assign[=] binary_operation[name[tabular_tail] + constant[}]]
if name[astable] begin[:]
variable[tabular_head] assign[=] binary_operation[binary_operation[constant[\centering] + constant[
]] + name[tabular_head]]
variable[tabular_head] assign[=] binary_operation[binary_operation[binary_operation[constant[\begin{table}] + name[table_position]] + constant[
]] + name[tabular_head]]
variable[lblstr] assign[=] call[name[latex_sanitize_command_name], parameter[call[name[kwargs].get, parameter[constant[label], name[title]]]]]
variable[caption] assign[=] name[title]
if name[AUTOFIX_LATEX] begin[:]
variable[caption] assign[=] call[name[escape_latex], parameter[name[caption]]]
variable[caption] assign[=] binary_operation[binary_operation[constant[
% ---
] + name[caption]] + constant[
% ---
]]
variable[tabular_tail] assign[=] binary_operation[name[tabular_tail] + binary_operation[constant[
\caption[%s]{%s}
\label{tbl:%s}
\end{table}] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b253b0d0>, <ast.Name object at 0x7da1b253ba60>, <ast.Name object at 0x7da1b25382b0>]]]]
variable[tabular_str] assign[=] call[name[rowvalsep].join, parameter[list[[<ast.Name object at 0x7da1b2538eb0>, <ast.Name object at 0x7da1b253a8f0>, <ast.Name object at 0x7da1b253bf10>]]]]
variable[topsep] assign[=] <ast.IfExp object at 0x7da1b253b790>
variable[botsep] assign[=] <ast.IfExp object at 0x7da1b253b8b0>
variable[tabular_str] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[tabular_head] + name[topsep]] + name[tabular_body]] + name[botsep]] + name[tabular_tail]]
if compare[name[common_rowlbl] is_not constant[None]] begin[:]
pass
return[name[tabular_str]] | keyword[def] identifier[make_score_tabular] (
identifier[row_lbls] , identifier[col_lbls] , identifier[values] , identifier[title] = keyword[None] , identifier[out_of] = keyword[None] , identifier[bold_best] = keyword[False] ,
identifier[flip] = keyword[False] , identifier[bigger_is_better] = keyword[True] , identifier[multicol_lbls] = keyword[None] , identifier[FORCE_INT] = keyword[False] ,
identifier[precision] = keyword[None] , identifier[SHORTEN_ROW_LBLS] = keyword[False] , identifier[col_align] = literal[string] , identifier[col_sep] = literal[string] ,
identifier[multicol_sep] = literal[string] , identifier[centerline] = keyword[True] , identifier[astable] = keyword[False] , identifier[table_position] = literal[string] ,
identifier[AUTOFIX_LATEX] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[utool] keyword[as] identifier[ut]
keyword[if] identifier[flip] :
identifier[bigger_is_better] = keyword[not] identifier[bigger_is_better]
identifier[flip_repltups] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )
]
identifier[col_lbls] =[ identifier[replace_all] ( identifier[lbl] , identifier[flip_repltups] ) keyword[for] identifier[lbl] keyword[in] identifier[col_lbls] ]
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] :
identifier[title] = identifier[replace_all] ( identifier[title] , identifier[flip_repltups] )
keyword[if] identifier[out_of] keyword[is] keyword[not] keyword[None] :
identifier[values] = identifier[out_of] - identifier[values]
identifier[common_rowlbl] = keyword[None]
keyword[if] identifier[SHORTEN_ROW_LBLS] :
keyword[if] identifier[isinstance] ( identifier[row_lbls] , identifier[list] ):
identifier[row_lbl_list] = identifier[row_lbls]
keyword[else] :
identifier[row_lbl_list] = identifier[row_lbls] . identifier[flatten] (). identifier[tolist] ()
identifier[longest] = identifier[long_substr] ( identifier[row_lbl_list] )
identifier[common_strs] =[]
keyword[while] identifier[len] ( identifier[longest] )> literal[int] :
identifier[common_strs] +=[ identifier[longest] ]
identifier[row_lbl_list] =[ identifier[row] . identifier[replace] ( identifier[longest] , literal[string] ) keyword[for] identifier[row] keyword[in] identifier[row_lbl_list] ]
identifier[longest] = identifier[long_substr] ( identifier[row_lbl_list] )
identifier[common_rowlbl] =( literal[string] . identifier[join] ( identifier[common_strs] )). identifier[replace] ( literal[string] , literal[string] )
identifier[row_lbls] = identifier[row_lbl_list]
keyword[if] identifier[len] ( identifier[row_lbl_list] )== literal[int] :
identifier[common_rowlbl] = identifier[row_lbl_list] [ literal[int] ]
identifier[row_lbls] =[ literal[string] ]
keyword[def] identifier[ensurelist] ( identifier[row_values] ):
keyword[try] :
keyword[return] identifier[row_values] . identifier[tolist] ()
keyword[except] identifier[AttributeError] :
keyword[return] identifier[row_values]
keyword[if] keyword[False] :
keyword[def] identifier[padvec] ( identifier[shape] =( literal[int] , literal[int] )):
identifier[pad] = identifier[np] . identifier[array] ([[ literal[string] keyword[for] identifier[c] keyword[in] identifier[range] ( identifier[shape] [ literal[int] ])] keyword[for] identifier[r] keyword[in] identifier[range] ( identifier[shape] [ literal[int] ])])
keyword[return] identifier[pad]
identifier[col_lbls] = identifier[ensure_rowvec] ( identifier[col_lbls] )
identifier[row_lbls] = identifier[ensure_colvec] ( identifier[row_lbls] )
identifier[_0] = identifier[np] . identifier[vstack] ([ identifier[padvec] (), identifier[row_lbls] ])
identifier[_1] = identifier[np] . identifier[vstack] ([ identifier[col_lbls] , identifier[values] ])
identifier[body] = identifier[np] . identifier[hstack] ([ identifier[_0] , identifier[_1] ])
identifier[body] =[[ identifier[str_] keyword[for] identifier[str_] keyword[in] identifier[row] ] keyword[for] identifier[row] keyword[in] identifier[body] ]
keyword[else] :
keyword[assert] identifier[len] ( identifier[row_lbls] )== identifier[len] ( identifier[values] )
identifier[body] =[[ literal[string] ]+ identifier[col_lbls] ]
identifier[body] +=[[ identifier[row_lbl] ]+ identifier[ensurelist] ( identifier[row_values] ) keyword[for] identifier[row_lbl] , identifier[row_values] keyword[in] identifier[zip] ( identifier[row_lbls] , identifier[values] )]
identifier[DO_PERCENT] = keyword[True]
keyword[try] :
keyword[for] identifier[r] keyword[in] identifier[range] ( identifier[len] ( identifier[body] )):
keyword[for] identifier[c] keyword[in] identifier[range] ( identifier[len] ( identifier[body] [ literal[int] ])):
keyword[if] identifier[r] > literal[int] keyword[and] identifier[c] > literal[int] :
keyword[if] identifier[precision] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[ut] . identifier[is_float] ( identifier[body] [ identifier[r] ][ identifier[c] ]):
identifier[fmtstr] = literal[string] + identifier[str] ( identifier[precision] )+ literal[string]
identifier[body] [ identifier[r] ][ identifier[c] ]= identifier[fmtstr] %( identifier[float] ( identifier[body] [ identifier[r] ][ identifier[c] ]),)
keyword[if] identifier[FORCE_INT] :
identifier[body] [ identifier[r] ][ identifier[c] ]= identifier[str] ( identifier[int] ( identifier[float] ( identifier[body] [ identifier[r] ][ identifier[c] ])))
identifier[body] [ identifier[r] ][ identifier[c] ]= identifier[str] ( identifier[body] [ identifier[r] ][ identifier[c] ])
keyword[if] identifier[AUTOFIX_LATEX] :
identifier[body] [ identifier[r] ][ identifier[c] ]= identifier[escape_latex] ( identifier[body] [ identifier[r] ][ identifier[c] ])
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[import] identifier[utool] keyword[as] identifier[ut]
identifier[print] ( literal[string] %( identifier[len] ( identifier[row_lbls] ),))
identifier[print] ( literal[string] %( identifier[len] ( identifier[col_lbls] ),))
identifier[print] ( literal[string] %( identifier[values] ,))
identifier[print] ( literal[string] %( identifier[ut] . identifier[depth_profile] ( identifier[values] ),))
identifier[ut] . identifier[printex] ( identifier[ex] , identifier[keys] =[ literal[string] , literal[string] ])
keyword[raise]
keyword[if] identifier[bold_best] :
identifier[best_col_scores] = identifier[values] . identifier[max] ( literal[int] ) keyword[if] identifier[bigger_is_better] keyword[else] identifier[values] . identifier[min] ( literal[int] )
identifier[rows_to_bold] =[ identifier[np] . identifier[where] ( identifier[values] [:, identifier[colx] ]== identifier[best_col_scores] [ identifier[colx] ])[ literal[int] ]
keyword[for] identifier[colx] keyword[in] identifier[range] ( identifier[len] ( identifier[values] . identifier[T] ))]
keyword[for] identifier[colx] , identifier[rowx_list] keyword[in] identifier[enumerate] ( identifier[rows_to_bold] ):
keyword[for] identifier[rowx] keyword[in] identifier[rowx_list] :
identifier[body] [ identifier[rowx] + literal[int] ][ identifier[colx] + literal[int] ]= literal[string] + identifier[body] [ identifier[rowx] + literal[int] ][ identifier[colx] + literal[int] ]+ literal[string]
keyword[for] identifier[r] keyword[in] identifier[range] ( identifier[len] ( identifier[body] )):
keyword[for] identifier[c] keyword[in] identifier[range] ( identifier[len] ( identifier[body] [ literal[int] ])):
keyword[if] identifier[r] > literal[int] keyword[and] identifier[c] > literal[int] :
keyword[if] identifier[out_of] keyword[is] keyword[not] keyword[None] :
identifier[body] [ identifier[r] ][ identifier[c] ]= identifier[body] [ identifier[r] ][ identifier[c] ]+ literal[string] + identifier[str] ( identifier[out_of] )
keyword[if] identifier[DO_PERCENT] :
identifier[percent] = literal[string] % identifier[float] ( literal[int] * identifier[values] [ identifier[r] - literal[int] , identifier[c] - literal[int] ]/ identifier[out_of] )
identifier[body] [ identifier[r] ][ identifier[c] ]+= identifier[escape_latex] ( identifier[percent] )
identifier[body] = identifier[np] . identifier[array] ( identifier[body] )
identifier[ALIGN_BODY] = keyword[True]
keyword[if] identifier[ALIGN_BODY] :
identifier[new_body_cols] =[]
keyword[for] identifier[col] keyword[in] identifier[body] . identifier[T] :
identifier[colstrs] = identifier[list] ( identifier[map] ( identifier[str] , identifier[ensurelist] ( identifier[col] )))
identifier[collens] = identifier[list] ( identifier[map] ( identifier[len] , identifier[colstrs] ))
identifier[maxlen] = identifier[max] ( identifier[collens] )
identifier[newcols] =[ identifier[str_] +( literal[string] *( identifier[maxlen] - identifier[len] ( identifier[str_] ))) keyword[for] identifier[str_] keyword[in] identifier[colstrs] ]
identifier[new_body_cols] +=[ identifier[newcols] ]
identifier[body] = identifier[np] . identifier[array] ( identifier[new_body_cols] ). identifier[T]
identifier[HLINE_SEP] = keyword[True]
identifier[rowvalsep] = literal[string]
identifier[colvalsep] = literal[string]
identifier[endl] = literal[string]
identifier[hline] = literal[string]
identifier[extra_rowsep_pos_list] =[]
keyword[if] identifier[HLINE_SEP] :
identifier[rowvalsep] = identifier[hline] + literal[string]
identifier[rowstr_list] =[ identifier[colvalsep] . identifier[join] ( identifier[row] )+ identifier[endl] keyword[for] identifier[row] keyword[in] identifier[body] ]
identifier[rowsep_list] =[ identifier[rowvalsep] keyword[for] identifier[row] keyword[in] identifier[rowstr_list] [ literal[int] :- literal[int] ]]
keyword[if] identifier[multicol_lbls] keyword[is] keyword[not] keyword[None] :
identifier[multicol_sep]
identifier[multicols] =[ identifier[latex_multicolumn] ( identifier[multicol] , identifier[size] , literal[string] + identifier[multicol_sep] ) keyword[for] identifier[multicol] , identifier[size] keyword[in] identifier[multicol_lbls] ]
identifier[multicol_str] = identifier[latex_multirow] ( literal[string] , literal[int] )+ identifier[colvalsep] + identifier[colvalsep] . identifier[join] ( identifier[multicols] )+ identifier[endl]
identifier[ncols] = identifier[sum] ([ identifier[tup] [ literal[int] ] keyword[for] identifier[tup] keyword[in] identifier[multicol_lbls] ])
identifier[mcol_sep] = literal[string] %( identifier[ncols] + literal[int] ,)
identifier[rowstr_list] =[ identifier[multicol_str] ]+ identifier[rowstr_list]
identifier[rowsep_list] =[ identifier[mcol_sep] ]+ identifier[rowsep_list]
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[astable] :
identifier[tex_title] = identifier[latex_multicolumn] ( identifier[title] , identifier[len] ( identifier[body] [ literal[int] ]))+ identifier[endl]
identifier[rowstr_list] =[ identifier[tex_title] ]+ identifier[rowstr_list]
identifier[rowsep_list] =[ identifier[rowvalsep] ]+ identifier[rowsep_list]
keyword[for] identifier[pos] keyword[in] identifier[sorted] ( identifier[extra_rowsep_pos_list] )[::- literal[int] ]:
identifier[rowstr_list] . identifier[insert] ( identifier[pos] , literal[string] )
identifier[rowsep_list] . identifier[insert] ( identifier[pos] , identifier[rowvalsep] )
keyword[from] identifier[six] . identifier[moves] keyword[import] identifier[zip_longest]
identifier[tabular_body] = literal[string] . identifier[join] ([ identifier[row] keyword[if] identifier[sep] keyword[is] keyword[None] keyword[else] identifier[row] + identifier[sep] keyword[for] identifier[row] , identifier[sep] keyword[in] identifier[zip_longest] ( identifier[rowstr_list] , identifier[rowsep_list] )])
identifier[col_align_list] =[ identifier[col_align] ]* identifier[len] ( identifier[body] [ literal[int] ])
identifier[extra_collayoutsep_pos_list] =[]
keyword[for] identifier[pos] keyword[in] identifier[sorted] ( identifier[extra_collayoutsep_pos_list] )[::- literal[int] ]:
identifier[col_align_list] . identifier[insert] ( identifier[pos] , literal[string] )
identifier[rowlblcol_sep] = literal[string]
identifier[col_align_sep_list] =[ identifier[col_sep] ]*( identifier[len] ( identifier[col_align_list] )- literal[int] )
keyword[if] identifier[len] ( identifier[col_align_sep_list] )> literal[int] :
identifier[col_align_sep_list] [ literal[int] ]= identifier[rowlblcol_sep]
keyword[if] identifier[multicol_lbls] keyword[is] keyword[not] keyword[None] :
identifier[multicol_offsets] = identifier[ut] . identifier[cumsum] ( identifier[ut] . identifier[get_list_column] ( identifier[multicol_lbls] , literal[int] ))
keyword[for] identifier[offset] keyword[in] identifier[multicol_offsets] :
keyword[if] identifier[offset] < identifier[len] ( identifier[col_align_sep_list] ):
identifier[col_align_sep_list] [ identifier[offset] ]= identifier[multicol_sep]
keyword[from] identifier[six] . identifier[moves] keyword[import] identifier[zip_longest]
identifier[_tmp] =[ identifier[ut] . identifier[filter_Nones] ( identifier[tup] ) keyword[for] identifier[tup] keyword[in] identifier[zip_longest] ( identifier[col_align_list] , identifier[col_align_sep_list] )]
identifier[col_layout] = literal[string] . identifier[join] ( identifier[ut] . identifier[flatten] ( identifier[_tmp] ))
identifier[tabular_head] =( literal[string] % identifier[col_layout] )+ literal[string]
identifier[tabular_tail] = literal[string]
keyword[if] identifier[centerline] :
identifier[tabular_head] = literal[string] + literal[string] + identifier[tabular_head]
identifier[tabular_tail] = identifier[tabular_tail] + literal[string]
keyword[if] identifier[astable] :
identifier[tabular_head] = literal[string] + literal[string] + identifier[tabular_head]
identifier[tabular_head] = literal[string] + identifier[table_position] + literal[string] + identifier[tabular_head]
identifier[lblstr] = identifier[latex_sanitize_command_name] ( identifier[kwargs] . identifier[get] ( literal[string] , identifier[title] ))
identifier[caption] = identifier[title]
keyword[if] identifier[AUTOFIX_LATEX] :
identifier[caption] = identifier[escape_latex] ( identifier[caption] )
identifier[caption] = literal[string] + identifier[caption] + literal[string]
identifier[tabular_tail] = identifier[tabular_tail] + literal[string] %( identifier[lblstr] , identifier[caption] , identifier[lblstr] )
identifier[tabular_str] = identifier[rowvalsep] . identifier[join] ([ identifier[tabular_head] , identifier[tabular_body] , identifier[tabular_tail] ])
identifier[topsep] = literal[string] keyword[if] keyword[True] keyword[else] literal[string]
identifier[botsep] = literal[string] keyword[if] keyword[True] keyword[else] literal[string]
identifier[tabular_str] = identifier[tabular_head] + identifier[topsep] + identifier[tabular_body] + identifier[botsep] + identifier[tabular_tail]
keyword[if] identifier[common_rowlbl] keyword[is] keyword[not] keyword[None] :
keyword[pass]
keyword[return] identifier[tabular_str] | def make_score_tabular(row_lbls, col_lbls, values, title=None, out_of=None, bold_best=False, flip=False, bigger_is_better=True, multicol_lbls=None, FORCE_INT=False, precision=None, SHORTEN_ROW_LBLS=False, col_align='l', col_sep='|', multicol_sep='|', centerline=True, astable=False, table_position='', AUTOFIX_LATEX=True, **kwargs):
"""
makes a LaTeX tabular for displaying scores or errors
Args:
row_lbls (list of str):
col_lbls (list of str):
values (ndarray):
title (str): (default = None)
out_of (None): (default = None)
bold_best (bool): (default = True)
flip (bool): (default = False)
table_position (str) : eg '[h]'
Returns:
str: tabular_str
CommandLine:
python -m utool.util_latex --test-make_score_tabular:0 --show
python -m utool.util_latex --test-make_score_tabular:1 --show
python -m utool.util_latex --test-make_score_tabular:2 --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \\leq 1', 'metric2']
>>> values = np.array([[1.2, 2], [3.2, 4]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1']
>>> col_lbls = ['score \\leq 1', 'metric2']
>>> values = np.array([[1.2, 2]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \\leq 1', 'metric2', 'foobar']
>>> multicol_lbls = [('spam', 1), ('eggs', 2)]
>>> values = np.array([[1.2, 2, -3], [3.2, 4, -2]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best = True
>>> flip = False
>>> tabular_str = make_score_tabular(row_lbls, col_lbls, values, title, out_of, bold_best, flip, multicol_lbls=multicol_lbls)
>>> result = tabular_str
>>> print(result)
>>> ut.quit_if_noshow()
>>> render_latex_text(tabular_str)
"""
import utool as ut
if flip:
bigger_is_better = not bigger_is_better
flip_repltups = [('<=', '>'), ('>', '<='), ('\\leq', '\\gt'), ('\\geq', '\\lt'), ('score', 'error')]
col_lbls = [replace_all(lbl, flip_repltups) for lbl in col_lbls]
if title is not None:
title = replace_all(title, flip_repltups) # depends on [control=['if'], data=['title']]
if out_of is not None:
values = out_of - values # depends on [control=['if'], data=['out_of']] # depends on [control=['if'], data=[]]
# Abbreviate based on common substrings
common_rowlbl = None
if SHORTEN_ROW_LBLS:
if isinstance(row_lbls, list):
row_lbl_list = row_lbls # depends on [control=['if'], data=[]]
else:
row_lbl_list = row_lbls.flatten().tolist()
# Split the rob labels into the alg components
#algcomp_list = [lbl.split(')_') for lbl in row_lbl_list]
longest = long_substr(row_lbl_list)
common_strs = []
while len(longest) > 10:
common_strs += [longest]
row_lbl_list = [row.replace(longest, '...') for row in row_lbl_list]
longest = long_substr(row_lbl_list) # depends on [control=['while'], data=[]]
common_rowlbl = '...'.join(common_strs).replace(')_', ')_\n')
row_lbls = row_lbl_list
if len(row_lbl_list) == 1:
common_rowlbl = row_lbl_list[0]
row_lbls = ['0'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Stack values into a tabular body
# TODO: need ability to specify datatypes
def ensurelist(row_values):
try:
return row_values.tolist() # depends on [control=['try'], data=[]]
except AttributeError:
return row_values # depends on [control=['except'], data=[]]
if False:
# Numpy formatting
def padvec(shape=(1, 1)):
pad = np.array([[' ' for c in range(shape[1])] for r in range(shape[0])])
return pad
col_lbls = ensure_rowvec(col_lbls)
row_lbls = ensure_colvec(row_lbls)
_0 = np.vstack([padvec(), row_lbls])
_1 = np.vstack([col_lbls, values])
body = np.hstack([_0, _1])
body = [[str_ for str_ in row] for row in body] # depends on [control=['if'], data=[]]
else:
assert len(row_lbls) == len(values)
body = [[' '] + col_lbls]
body += [[row_lbl] + ensurelist(row_values) for (row_lbl, row_values) in zip(row_lbls, values)]
#import utool as ut
# Fix things in each body cell
DO_PERCENT = True
try:
for r in range(len(body)):
for c in range(len(body[0])):
# In data land
if r > 0 and c > 0:
if precision is not None:
# Hack
if ut.is_float(body[r][c]):
fmtstr = '%.' + str(precision) + 'f'
body[r][c] = fmtstr % (float(body[r][c]),) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['precision']]
# Force integer
if FORCE_INT:
body[r][c] = str(int(float(body[r][c]))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
body[r][c] = str(body[r][c])
# Remove bad formatting;
if AUTOFIX_LATEX:
body[r][c] = escape_latex(body[r][c]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['r']] # depends on [control=['try'], data=[]]
except Exception as ex:
import utool as ut
print('len(row_lbls) = %r' % (len(row_lbls),))
print('len(col_lbls) = %r' % (len(col_lbls),))
print('len(values) = %r' % (values,))
print('ut.depth_profile(values) = %r' % (ut.depth_profile(values),))
ut.printex(ex, keys=['r', 'c'])
raise # depends on [control=['except'], data=['ex']]
# Bold the best values
if bold_best:
best_col_scores = values.max(0) if bigger_is_better else values.min(0)
rows_to_bold = [np.where(values[:, colx] == best_col_scores[colx])[0] for colx in range(len(values.T))]
for (colx, rowx_list) in enumerate(rows_to_bold):
for rowx in rowx_list:
body[rowx + 1][colx + 1] = '\\txtbf{' + body[rowx + 1][colx + 1] + '}' # depends on [control=['for'], data=['rowx']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# More fixing after the bold is in place
for r in range(len(body)):
for c in range(len(body[0])):
# In data land
if r > 0 and c > 0:
if out_of is not None:
body[r][c] = body[r][c] + '/' + str(out_of)
if DO_PERCENT:
percent = ' = %.1f%%' % float(100 * values[r - 1, c - 1] / out_of)
body[r][c] += escape_latex(percent) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['out_of']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['r']]
# Align columns for pretty printing
body = np.array(body)
ALIGN_BODY = True
if ALIGN_BODY:
new_body_cols = []
for col in body.T:
colstrs = list(map(str, ensurelist(col)))
collens = list(map(len, colstrs))
maxlen = max(collens)
newcols = [str_ + ' ' * (maxlen - len(str_)) for str_ in colstrs]
new_body_cols += [newcols] # depends on [control=['for'], data=['col']]
body = np.array(new_body_cols).T # depends on [control=['if'], data=[]]
# Build Body (and row layout)
HLINE_SEP = True
rowvalsep = ''
colvalsep = ' & '
endl = '\\\\\n'
hline = '\\hline'
#extra_rowsep_pos_list = [1] # rows to insert an extra hline after
extra_rowsep_pos_list = [] # rows to insert an extra hline after
if HLINE_SEP:
rowvalsep = hline + '\n' # depends on [control=['if'], data=[]]
# rowstr list holds blocks of rows
rowstr_list = [colvalsep.join(row) + endl for row in body]
#rowstr_list = [row[0] + rowlbl_sep + colvalsep.join(row[1:]) + endl for row in body]
#rowstr_list = [(
# ('' if len(row) == 0 else row[0])
# if len(row) <= 1 else
# row[0] + rowlblcol_sep + colvalsep.join(row[1:]) + endl)
# for row in body]
rowsep_list = [rowvalsep for row in rowstr_list[0:-1]] # should be len 1 less than rowstr_list
# Insert multicolumn names
if multicol_lbls is not None:
# TODO: label of the row labels
multicol_sep
multicols = [latex_multicolumn(multicol, size, 'c' + multicol_sep) for (multicol, size) in multicol_lbls]
multicol_str = latex_multirow('', 2) + colvalsep + colvalsep.join(multicols) + endl
ncols = sum([tup[1] for tup in multicol_lbls])
mcol_sep = '\\cline{2-%d}\n' % (ncols + 1,)
rowstr_list = [multicol_str] + rowstr_list
rowsep_list = [mcol_sep] + rowsep_list # depends on [control=['if'], data=['multicol_lbls']]
#extra_rowsep_pos_list += [1]
# Insert title
if title is not None and (not astable):
tex_title = latex_multicolumn(title, len(body[0])) + endl
rowstr_list = [tex_title] + rowstr_list
rowsep_list = [rowvalsep] + rowsep_list # depends on [control=['if'], data=[]]
#extra_rowsep_pos_list += [2]
# Apply an extra hline (for label)
#extra_rowsep_pos_list = []
for pos in sorted(extra_rowsep_pos_list)[::-1]:
rowstr_list.insert(pos, '')
rowsep_list.insert(pos, rowvalsep) # depends on [control=['for'], data=['pos']]
#tabular_body = rowvalsep.join(rowstr_list)
from six.moves import zip_longest
tabular_body = ''.join([row if sep is None else row + sep for (row, sep) in zip_longest(rowstr_list, rowsep_list)])
# Build Column Layout
col_align_list = [col_align] * len(body[0])
#extra_collayoutsep_pos_list = [1]
extra_collayoutsep_pos_list = []
for pos in sorted(extra_collayoutsep_pos_list)[::-1]:
col_align_list.insert(pos, '') # depends on [control=['for'], data=['pos']]
#col_layaout_sep_list = rowlblcol_sep # TODO
rowlblcol_sep = '|'
# Build build internal seprations between column alignments
# Defaults to just the normal col_sep
col_align_sep_list = [col_sep] * (len(col_align_list) - 1)
# Adjust for the separations between row labels and the actual row data
if len(col_align_sep_list) > 0:
col_align_sep_list[0] = rowlblcol_sep # depends on [control=['if'], data=[]]
# Continue multicolumn sepratation
if multicol_lbls is not None:
multicol_offsets = ut.cumsum(ut.get_list_column(multicol_lbls, 1))
for offset in multicol_offsets:
if offset < len(col_align_sep_list):
col_align_sep_list[offset] = multicol_sep # depends on [control=['if'], data=['offset']] # depends on [control=['for'], data=['offset']] # depends on [control=['if'], data=['multicol_lbls']]
from six.moves import zip_longest
_tmp = [ut.filter_Nones(tup) for tup in zip_longest(col_align_list, col_align_sep_list)]
col_layout = ''.join(ut.flatten(_tmp))
#if len(col_align_list) > 1:
# col_layout = col_align_list[0] + rowlblcol_sep + col_sep.join(col_align_list[1:])
#else:
# col_layout = col_sep.join(col_align_list)
tabular_head = '\\begin{tabular}{|%s|}' % col_layout + '\n'
tabular_tail = '\\end{tabular}'
if centerline:
tabular_head = '\\centerline{' + '\n' + tabular_head
tabular_tail = tabular_tail + '}' # depends on [control=['if'], data=[]]
if astable:
#tabular_head = r'\begin{centering}' + '\n' + tabular_head
tabular_head = '\\centering' + '\n' + tabular_head
tabular_head = '\\begin{table}' + table_position + '\n' + tabular_head
lblstr = latex_sanitize_command_name(kwargs.get('label', title))
caption = title
if AUTOFIX_LATEX:
caption = escape_latex(caption) # depends on [control=['if'], data=[]]
caption = '\n% ---\n' + caption + '\n% ---\n'
#tabular_head = r'\end{centering}' + '\n' + tabular_head
tabular_tail = tabular_tail + '\n\\caption[%s]{%s}\n\\label{tbl:%s}\n\\end{table}' % (lblstr, caption, lblstr) # depends on [control=['if'], data=[]]
tabular_str = rowvalsep.join([tabular_head, tabular_body, tabular_tail])
topsep = '\\hline\n' if True else '\\toprule\n'
botsep = '\\hline\n' if True else '\\bottomrule\n'
tabular_str = tabular_head + topsep + tabular_body + botsep + tabular_tail
if common_rowlbl is not None:
#tabular_str += escape_latex('\n\nThe following parameters were held fixed:\n' + common_rowlbl)
pass # depends on [control=['if'], data=[]]
return tabular_str |
def to_string(self, indent):
"""Print with indent."""
ind = indent * ' '
if self.root:
print(ind, self.type, '---', self.root)
else:
print(ind, self.type)
indent = indent + 3
ind = indent * ' '
for children in self.children:
if children is None:
print("OOPS! type of parent is", type(self))
print(self.children)
if isinstance(children, str):
print(ind, children)
elif isinstance(children, int):
print(ind, str(children))
elif isinstance(children, float):
print(ind, str(children))
else:
children.to_string(indent) | def function[to_string, parameter[self, indent]]:
constant[Print with indent.]
variable[ind] assign[=] binary_operation[name[indent] * constant[ ]]
if name[self].root begin[:]
call[name[print], parameter[name[ind], name[self].type, constant[---], name[self].root]]
variable[indent] assign[=] binary_operation[name[indent] + constant[3]]
variable[ind] assign[=] binary_operation[name[indent] * constant[ ]]
for taget[name[children]] in starred[name[self].children] begin[:]
if compare[name[children] is constant[None]] begin[:]
call[name[print], parameter[constant[OOPS! type of parent is], call[name[type], parameter[name[self]]]]]
call[name[print], parameter[name[self].children]]
if call[name[isinstance], parameter[name[children], name[str]]] begin[:]
call[name[print], parameter[name[ind], name[children]]] | keyword[def] identifier[to_string] ( identifier[self] , identifier[indent] ):
literal[string]
identifier[ind] = identifier[indent] * literal[string]
keyword[if] identifier[self] . identifier[root] :
identifier[print] ( identifier[ind] , identifier[self] . identifier[type] , literal[string] , identifier[self] . identifier[root] )
keyword[else] :
identifier[print] ( identifier[ind] , identifier[self] . identifier[type] )
identifier[indent] = identifier[indent] + literal[int]
identifier[ind] = identifier[indent] * literal[string]
keyword[for] identifier[children] keyword[in] identifier[self] . identifier[children] :
keyword[if] identifier[children] keyword[is] keyword[None] :
identifier[print] ( literal[string] , identifier[type] ( identifier[self] ))
identifier[print] ( identifier[self] . identifier[children] )
keyword[if] identifier[isinstance] ( identifier[children] , identifier[str] ):
identifier[print] ( identifier[ind] , identifier[children] )
keyword[elif] identifier[isinstance] ( identifier[children] , identifier[int] ):
identifier[print] ( identifier[ind] , identifier[str] ( identifier[children] ))
keyword[elif] identifier[isinstance] ( identifier[children] , identifier[float] ):
identifier[print] ( identifier[ind] , identifier[str] ( identifier[children] ))
keyword[else] :
identifier[children] . identifier[to_string] ( identifier[indent] ) | def to_string(self, indent):
"""Print with indent."""
ind = indent * ' '
if self.root:
print(ind, self.type, '---', self.root) # depends on [control=['if'], data=[]]
else:
print(ind, self.type)
indent = indent + 3
ind = indent * ' '
for children in self.children:
if children is None:
print('OOPS! type of parent is', type(self))
print(self.children) # depends on [control=['if'], data=[]]
if isinstance(children, str):
print(ind, children) # depends on [control=['if'], data=[]]
elif isinstance(children, int):
print(ind, str(children)) # depends on [control=['if'], data=[]]
elif isinstance(children, float):
print(ind, str(children)) # depends on [control=['if'], data=[]]
else:
children.to_string(indent) # depends on [control=['for'], data=['children']] |
def _rank_genes_groups_plot(adata, plot_type='heatmap', groups=None,
n_genes=10, groupby=None, key=None,
show=None, save=None, **kwds):
"""\
Plot ranking of genes using the specified plot type
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groups : `str` or `list` of `str`
The groups for which to show the gene ranking.
n_genes : `int`, optional (default: 10)
Number of genes to show.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider. By default,
the groupby is chosen from the rank genes groups parameter but
other groupby options can be used.
{show_save_ax}
"""
if key is None:
key = 'rank_genes_groups'
if 'dendrogram' not in kwds:
kwds['dendrogram'] = True
if groupby is None:
groupby = str(adata.uns[key]['params']['groupby'])
group_names = (adata.uns[key]['names'].dtype.names
if groups is None else groups)
gene_names = []
start = 0
group_positions = []
group_names_valid = []
for group in group_names:
# get all genes that are 'not-nan'
genes_list = [gene for gene in adata.uns[key]['names'][group] if not pd.isnull(gene)][:n_genes]
if len(genes_list) == 0:
logg.warn("No genes found for group {}".format(group))
continue
gene_names.extend(genes_list)
end = start + len(genes_list)
group_positions.append((start, end -1))
group_names_valid.append(group)
start = end
group_names = group_names_valid
if plot_type == 'dotplot':
from .._anndata import dotplot
dotplot(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'heatmap':
from .._anndata import heatmap
heatmap(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'stacked_violin':
from .._anndata import stacked_violin
return stacked_violin(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'tracksplot':
from .._anndata import tracksplot
return tracksplot(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'matrixplot':
from .._anndata import matrixplot
matrixplot(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds) | def function[_rank_genes_groups_plot, parameter[adata, plot_type, groups, n_genes, groupby, key, show, save]]:
constant[ Plot ranking of genes using the specified plot type
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groups : `str` or `list` of `str`
The groups for which to show the gene ranking.
n_genes : `int`, optional (default: 10)
Number of genes to show.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider. By default,
the groupby is chosen from the rank genes groups parameter but
other groupby options can be used.
{show_save_ax}
]
if compare[name[key] is constant[None]] begin[:]
variable[key] assign[=] constant[rank_genes_groups]
if compare[constant[dendrogram] <ast.NotIn object at 0x7da2590d7190> name[kwds]] begin[:]
call[name[kwds]][constant[dendrogram]] assign[=] constant[True]
if compare[name[groupby] is constant[None]] begin[:]
variable[groupby] assign[=] call[name[str], parameter[call[call[call[name[adata].uns][name[key]]][constant[params]]][constant[groupby]]]]
variable[group_names] assign[=] <ast.IfExp object at 0x7da1b23471f0>
variable[gene_names] assign[=] list[[]]
variable[start] assign[=] constant[0]
variable[group_positions] assign[=] list[[]]
variable[group_names_valid] assign[=] list[[]]
for taget[name[group]] in starred[name[group_names]] begin[:]
variable[genes_list] assign[=] call[<ast.ListComp object at 0x7da18f720a90>][<ast.Slice object at 0x7da18f7213f0>]
if compare[call[name[len], parameter[name[genes_list]]] equal[==] constant[0]] begin[:]
call[name[logg].warn, parameter[call[constant[No genes found for group {}].format, parameter[name[group]]]]]
continue
call[name[gene_names].extend, parameter[name[genes_list]]]
variable[end] assign[=] binary_operation[name[start] + call[name[len], parameter[name[genes_list]]]]
call[name[group_positions].append, parameter[tuple[[<ast.Name object at 0x7da18f7203a0>, <ast.BinOp object at 0x7da18f720fd0>]]]]
call[name[group_names_valid].append, parameter[name[group]]]
variable[start] assign[=] name[end]
variable[group_names] assign[=] name[group_names_valid]
if compare[name[plot_type] equal[==] constant[dotplot]] begin[:]
from relative_module[_anndata] import module[dotplot]
call[name[dotplot], parameter[name[adata], name[gene_names], name[groupby]]] | keyword[def] identifier[_rank_genes_groups_plot] ( identifier[adata] , identifier[plot_type] = literal[string] , identifier[groups] = keyword[None] ,
identifier[n_genes] = literal[int] , identifier[groupby] = keyword[None] , identifier[key] = keyword[None] ,
identifier[show] = keyword[None] , identifier[save] = keyword[None] ,** identifier[kwds] ):
literal[string]
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[key] = literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwds] :
identifier[kwds] [ literal[string] ]= keyword[True]
keyword[if] identifier[groupby] keyword[is] keyword[None] :
identifier[groupby] = identifier[str] ( identifier[adata] . identifier[uns] [ identifier[key] ][ literal[string] ][ literal[string] ])
identifier[group_names] =( identifier[adata] . identifier[uns] [ identifier[key] ][ literal[string] ]. identifier[dtype] . identifier[names]
keyword[if] identifier[groups] keyword[is] keyword[None] keyword[else] identifier[groups] )
identifier[gene_names] =[]
identifier[start] = literal[int]
identifier[group_positions] =[]
identifier[group_names_valid] =[]
keyword[for] identifier[group] keyword[in] identifier[group_names] :
identifier[genes_list] =[ identifier[gene] keyword[for] identifier[gene] keyword[in] identifier[adata] . identifier[uns] [ identifier[key] ][ literal[string] ][ identifier[group] ] keyword[if] keyword[not] identifier[pd] . identifier[isnull] ( identifier[gene] )][: identifier[n_genes] ]
keyword[if] identifier[len] ( identifier[genes_list] )== literal[int] :
identifier[logg] . identifier[warn] ( literal[string] . identifier[format] ( identifier[group] ))
keyword[continue]
identifier[gene_names] . identifier[extend] ( identifier[genes_list] )
identifier[end] = identifier[start] + identifier[len] ( identifier[genes_list] )
identifier[group_positions] . identifier[append] (( identifier[start] , identifier[end] - literal[int] ))
identifier[group_names_valid] . identifier[append] ( identifier[group] )
identifier[start] = identifier[end]
identifier[group_names] = identifier[group_names_valid]
keyword[if] identifier[plot_type] == literal[string] :
keyword[from] .. identifier[_anndata] keyword[import] identifier[dotplot]
identifier[dotplot] ( identifier[adata] , identifier[gene_names] , identifier[groupby] , identifier[var_group_labels] = identifier[group_names] ,
identifier[var_group_positions] = identifier[group_positions] , identifier[show] = identifier[show] , identifier[save] = identifier[save] ,** identifier[kwds] )
keyword[elif] identifier[plot_type] == literal[string] :
keyword[from] .. identifier[_anndata] keyword[import] identifier[heatmap]
identifier[heatmap] ( identifier[adata] , identifier[gene_names] , identifier[groupby] , identifier[var_group_labels] = identifier[group_names] ,
identifier[var_group_positions] = identifier[group_positions] , identifier[show] = identifier[show] , identifier[save] = identifier[save] ,** identifier[kwds] )
keyword[elif] identifier[plot_type] == literal[string] :
keyword[from] .. identifier[_anndata] keyword[import] identifier[stacked_violin]
keyword[return] identifier[stacked_violin] ( identifier[adata] , identifier[gene_names] , identifier[groupby] , identifier[var_group_labels] = identifier[group_names] ,
identifier[var_group_positions] = identifier[group_positions] , identifier[show] = identifier[show] , identifier[save] = identifier[save] ,** identifier[kwds] )
keyword[elif] identifier[plot_type] == literal[string] :
keyword[from] .. identifier[_anndata] keyword[import] identifier[tracksplot]
keyword[return] identifier[tracksplot] ( identifier[adata] , identifier[gene_names] , identifier[groupby] , identifier[var_group_labels] = identifier[group_names] ,
identifier[var_group_positions] = identifier[group_positions] , identifier[show] = identifier[show] , identifier[save] = identifier[save] ,** identifier[kwds] )
keyword[elif] identifier[plot_type] == literal[string] :
keyword[from] .. identifier[_anndata] keyword[import] identifier[matrixplot]
identifier[matrixplot] ( identifier[adata] , identifier[gene_names] , identifier[groupby] , identifier[var_group_labels] = identifier[group_names] ,
identifier[var_group_positions] = identifier[group_positions] , identifier[show] = identifier[show] , identifier[save] = identifier[save] ,** identifier[kwds] ) | def _rank_genes_groups_plot(adata, plot_type='heatmap', groups=None, n_genes=10, groupby=None, key=None, show=None, save=None, **kwds):
""" Plot ranking of genes using the specified plot type
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groups : `str` or `list` of `str`
The groups for which to show the gene ranking.
n_genes : `int`, optional (default: 10)
Number of genes to show.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider. By default,
the groupby is chosen from the rank genes groups parameter but
other groupby options can be used.
{show_save_ax}
"""
if key is None:
key = 'rank_genes_groups' # depends on [control=['if'], data=['key']]
if 'dendrogram' not in kwds:
kwds['dendrogram'] = True # depends on [control=['if'], data=['kwds']]
if groupby is None:
groupby = str(adata.uns[key]['params']['groupby']) # depends on [control=['if'], data=['groupby']]
group_names = adata.uns[key]['names'].dtype.names if groups is None else groups
gene_names = []
start = 0
group_positions = []
group_names_valid = []
for group in group_names:
# get all genes that are 'not-nan'
genes_list = [gene for gene in adata.uns[key]['names'][group] if not pd.isnull(gene)][:n_genes]
if len(genes_list) == 0:
logg.warn('No genes found for group {}'.format(group))
continue # depends on [control=['if'], data=[]]
gene_names.extend(genes_list)
end = start + len(genes_list)
group_positions.append((start, end - 1))
group_names_valid.append(group)
start = end # depends on [control=['for'], data=['group']]
group_names = group_names_valid
if plot_type == 'dotplot':
from .._anndata import dotplot
dotplot(adata, gene_names, groupby, var_group_labels=group_names, var_group_positions=group_positions, show=show, save=save, **kwds) # depends on [control=['if'], data=[]]
elif plot_type == 'heatmap':
from .._anndata import heatmap
heatmap(adata, gene_names, groupby, var_group_labels=group_names, var_group_positions=group_positions, show=show, save=save, **kwds) # depends on [control=['if'], data=[]]
elif plot_type == 'stacked_violin':
from .._anndata import stacked_violin
return stacked_violin(adata, gene_names, groupby, var_group_labels=group_names, var_group_positions=group_positions, show=show, save=save, **kwds) # depends on [control=['if'], data=[]]
elif plot_type == 'tracksplot':
from .._anndata import tracksplot
return tracksplot(adata, gene_names, groupby, var_group_labels=group_names, var_group_positions=group_positions, show=show, save=save, **kwds) # depends on [control=['if'], data=[]]
elif plot_type == 'matrixplot':
from .._anndata import matrixplot
matrixplot(adata, gene_names, groupby, var_group_labels=group_names, var_group_positions=group_positions, show=show, save=save, **kwds) # depends on [control=['if'], data=[]] |
def _construct(self):
"""
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
"""
self._acyclic_cfg = self._cfg.copy()
# TODO: Cycle-removing is not needed - confirm it later
# The CFG we use should be acyclic!
#self._acyclic_cfg.remove_cycles()
# Pre-process the acyclic CFG
self._pre_process_cfg()
# Construct post-dominator tree
self._pd_construct()
self._graph = networkx.DiGraph()
# Construct the reversed dominance frontier mapping
rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom)
for y in self._cfg.graph.nodes():
if y not in rdf:
continue
for x in rdf[y]:
self._graph.add_edge(x, y) | def function[_construct, parameter[self]]:
constant[
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
]
name[self]._acyclic_cfg assign[=] call[name[self]._cfg.copy, parameter[]]
call[name[self]._pre_process_cfg, parameter[]]
call[name[self]._pd_construct, parameter[]]
name[self]._graph assign[=] call[name[networkx].DiGraph, parameter[]]
variable[rdf] assign[=] call[name[compute_dominance_frontier], parameter[name[self]._normalized_cfg, name[self]._post_dom]]
for taget[name[y]] in starred[call[name[self]._cfg.graph.nodes, parameter[]]] begin[:]
if compare[name[y] <ast.NotIn object at 0x7da2590d7190> name[rdf]] begin[:]
continue
for taget[name[x]] in starred[call[name[rdf]][name[y]]] begin[:]
call[name[self]._graph.add_edge, parameter[name[x], name[y]]] | keyword[def] identifier[_construct] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_acyclic_cfg] = identifier[self] . identifier[_cfg] . identifier[copy] ()
identifier[self] . identifier[_pre_process_cfg] ()
identifier[self] . identifier[_pd_construct] ()
identifier[self] . identifier[_graph] = identifier[networkx] . identifier[DiGraph] ()
identifier[rdf] = identifier[compute_dominance_frontier] ( identifier[self] . identifier[_normalized_cfg] , identifier[self] . identifier[_post_dom] )
keyword[for] identifier[y] keyword[in] identifier[self] . identifier[_cfg] . identifier[graph] . identifier[nodes] ():
keyword[if] identifier[y] keyword[not] keyword[in] identifier[rdf] :
keyword[continue]
keyword[for] identifier[x] keyword[in] identifier[rdf] [ identifier[y] ]:
identifier[self] . identifier[_graph] . identifier[add_edge] ( identifier[x] , identifier[y] ) | def _construct(self):
"""
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
"""
self._acyclic_cfg = self._cfg.copy()
# TODO: Cycle-removing is not needed - confirm it later
# The CFG we use should be acyclic!
#self._acyclic_cfg.remove_cycles()
# Pre-process the acyclic CFG
self._pre_process_cfg()
# Construct post-dominator tree
self._pd_construct()
self._graph = networkx.DiGraph()
# Construct the reversed dominance frontier mapping
rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom)
for y in self._cfg.graph.nodes():
if y not in rdf:
continue # depends on [control=['if'], data=[]]
for x in rdf[y]:
self._graph.add_edge(x, y) # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']] |
def get_version(file, name='__version__'):
"""Get the version of the package from the given file by
executing it and extracting the given `name`.
"""
path = os.path.realpath(file)
version_ns = {}
with io.open(path, encoding="utf8") as f:
exec(f.read(), {}, version_ns)
return version_ns[name] | def function[get_version, parameter[file, name]]:
constant[Get the version of the package from the given file by
executing it and extracting the given `name`.
]
variable[path] assign[=] call[name[os].path.realpath, parameter[name[file]]]
variable[version_ns] assign[=] dictionary[[], []]
with call[name[io].open, parameter[name[path]]] begin[:]
call[name[exec], parameter[call[name[f].read, parameter[]], dictionary[[], []], name[version_ns]]]
return[call[name[version_ns]][name[name]]] | keyword[def] identifier[get_version] ( identifier[file] , identifier[name] = literal[string] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[file] )
identifier[version_ns] ={}
keyword[with] identifier[io] . identifier[open] ( identifier[path] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[exec] ( identifier[f] . identifier[read] (),{}, identifier[version_ns] )
keyword[return] identifier[version_ns] [ identifier[name] ] | def get_version(file, name='__version__'):
"""Get the version of the package from the given file by
executing it and extracting the given `name`.
"""
path = os.path.realpath(file)
version_ns = {}
with io.open(path, encoding='utf8') as f:
exec(f.read(), {}, version_ns) # depends on [control=['with'], data=['f']]
return version_ns[name] |
def Overlay_setShowDebugBorders(self, show):
"""
Function path: Overlay.setShowDebugBorders
Domain: Overlay
Method name: setShowDebugBorders
Parameters:
Required arguments:
'show' (type: boolean) -> True for showing debug borders
No return value.
Description: Requests that backend shows debug borders on layers
"""
assert isinstance(show, (bool,)
), "Argument 'show' must be of type '['bool']'. Received type: '%s'" % type(
show)
subdom_funcs = self.synchronous_command('Overlay.setShowDebugBorders',
show=show)
return subdom_funcs | def function[Overlay_setShowDebugBorders, parameter[self, show]]:
constant[
Function path: Overlay.setShowDebugBorders
Domain: Overlay
Method name: setShowDebugBorders
Parameters:
Required arguments:
'show' (type: boolean) -> True for showing debug borders
No return value.
Description: Requests that backend shows debug borders on layers
]
assert[call[name[isinstance], parameter[name[show], tuple[[<ast.Name object at 0x7da1b1028bb0>]]]]]
variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Overlay.setShowDebugBorders]]]
return[name[subdom_funcs]] | keyword[def] identifier[Overlay_setShowDebugBorders] ( identifier[self] , identifier[show] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[show] ,( identifier[bool] ,)
), literal[string] % identifier[type] (
identifier[show] )
identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] ,
identifier[show] = identifier[show] )
keyword[return] identifier[subdom_funcs] | def Overlay_setShowDebugBorders(self, show):
"""
Function path: Overlay.setShowDebugBorders
Domain: Overlay
Method name: setShowDebugBorders
Parameters:
Required arguments:
'show' (type: boolean) -> True for showing debug borders
No return value.
Description: Requests that backend shows debug borders on layers
"""
assert isinstance(show, (bool,)), "Argument 'show' must be of type '['bool']'. Received type: '%s'" % type(show)
subdom_funcs = self.synchronous_command('Overlay.setShowDebugBorders', show=show)
return subdom_funcs |
def filepath(self, value):
""" Property for setting current filepath, automatically takes out lock on new file if not readonly db. """
if not self.readonly and self._filepath != value:
if self._locked:
self.log.debug("Releasing previously-held lock file: {0}".format(self.lockfile))
# Release the lock on previous filepath.
self.release_lock()
self._filepath = value
if self._filepath is not None:
self.acquire_lock()
else:
self._filepath = value | def function[filepath, parameter[self, value]]:
constant[ Property for setting current filepath, automatically takes out lock on new file if not readonly db. ]
if <ast.BoolOp object at 0x7da18f00c370> begin[:]
if name[self]._locked begin[:]
call[name[self].log.debug, parameter[call[constant[Releasing previously-held lock file: {0}].format, parameter[name[self].lockfile]]]]
call[name[self].release_lock, parameter[]]
name[self]._filepath assign[=] name[value]
if compare[name[self]._filepath is_not constant[None]] begin[:]
call[name[self].acquire_lock, parameter[]] | keyword[def] identifier[filepath] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[readonly] keyword[and] identifier[self] . identifier[_filepath] != identifier[value] :
keyword[if] identifier[self] . identifier[_locked] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[lockfile] ))
identifier[self] . identifier[release_lock] ()
identifier[self] . identifier[_filepath] = identifier[value]
keyword[if] identifier[self] . identifier[_filepath] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[acquire_lock] ()
keyword[else] :
identifier[self] . identifier[_filepath] = identifier[value] | def filepath(self, value):
""" Property for setting current filepath, automatically takes out lock on new file if not readonly db. """
if not self.readonly and self._filepath != value:
if self._locked:
self.log.debug('Releasing previously-held lock file: {0}'.format(self.lockfile))
# Release the lock on previous filepath.
self.release_lock() # depends on [control=['if'], data=[]]
self._filepath = value
if self._filepath is not None:
self.acquire_lock() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
self._filepath = value |
def bin_annotation(annotation=None, subsampling_factor=3):
"""Perform binning on genome annotations such as contig information or bin
positions.
"""
if annotation is None:
annotation = np.array([])
n = len(annotation)
binned_positions = [annotation[i] for i in range(n) if
i % subsampling_factor == 0]
if len(binned_positions) == 0:
binned_positions.append(0)
return np.array(binned_positions) | def function[bin_annotation, parameter[annotation, subsampling_factor]]:
constant[Perform binning on genome annotations such as contig information or bin
positions.
]
if compare[name[annotation] is constant[None]] begin[:]
variable[annotation] assign[=] call[name[np].array, parameter[list[[]]]]
variable[n] assign[=] call[name[len], parameter[name[annotation]]]
variable[binned_positions] assign[=] <ast.ListComp object at 0x7da1b23807f0>
if compare[call[name[len], parameter[name[binned_positions]]] equal[==] constant[0]] begin[:]
call[name[binned_positions].append, parameter[constant[0]]]
return[call[name[np].array, parameter[name[binned_positions]]]] | keyword[def] identifier[bin_annotation] ( identifier[annotation] = keyword[None] , identifier[subsampling_factor] = literal[int] ):
literal[string]
keyword[if] identifier[annotation] keyword[is] keyword[None] :
identifier[annotation] = identifier[np] . identifier[array] ([])
identifier[n] = identifier[len] ( identifier[annotation] )
identifier[binned_positions] =[ identifier[annotation] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ) keyword[if]
identifier[i] % identifier[subsampling_factor] == literal[int] ]
keyword[if] identifier[len] ( identifier[binned_positions] )== literal[int] :
identifier[binned_positions] . identifier[append] ( literal[int] )
keyword[return] identifier[np] . identifier[array] ( identifier[binned_positions] ) | def bin_annotation(annotation=None, subsampling_factor=3):
"""Perform binning on genome annotations such as contig information or bin
positions.
"""
if annotation is None:
annotation = np.array([]) # depends on [control=['if'], data=['annotation']]
n = len(annotation)
binned_positions = [annotation[i] for i in range(n) if i % subsampling_factor == 0]
if len(binned_positions) == 0:
binned_positions.append(0) # depends on [control=['if'], data=[]]
return np.array(binned_positions) |
def lines_by_attribute(self, attr_val=None, attr='type'):
""" Returns a generator for iterating over Graph's lines by attribute value.
Get all lines that share the same attribute. By default, the attr 'type'
is used to specify the lines' type (line, agg_line, etc.).
The edge of a graph is described by the two adjacent nodes and the line
object itself. Whereas the line object is used to hold all relevant
power system parameters.
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_edge(1, 2, type='line')
>>> lines = G.lines_by_attribute('line')
>>> list(lines)[0]
<class 'tuple'>: ((node1, node2), line)
Parameters
----------
attr_val: str
Value of the `attr` lines should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
Generator of :obj:`dict`
A list containing line elements that match the given attribute
value
Notes
-----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('line' is used here)
To make access to attributes of the line objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the line object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
Adapted from `Dingo <https://github.com/openego/dingo/blob/\
ee237e37d4c228081e1e246d7e6d0d431c6dda9e/dingo/core/network/\
__init__.py>`_.
"""
# get all lines that have the attribute 'type' set
lines_attributes = nx.get_edge_attributes(self, attr).items()
# attribute value provided?
if attr_val:
# extract lines where 'type' == attr_val
lines_attributes = [(k, self[k[0]][k[1]]['line'])
for k, v in lines_attributes if v == attr_val]
else:
# get all lines
lines_attributes = [(k, self[k[0]][k[1]]['line'])
for k, v in lines_attributes]
# sort them according to connected nodes
lines_sorted = sorted(list(lines_attributes), key=lambda _: repr(_[1]))
for line in lines_sorted:
yield {'adj_nodes': line[0], 'line': line[1]} | def function[lines_by_attribute, parameter[self, attr_val, attr]]:
constant[ Returns a generator for iterating over Graph's lines by attribute value.
Get all lines that share the same attribute. By default, the attr 'type'
is used to specify the lines' type (line, agg_line, etc.).
The edge of a graph is described by the two adjacent nodes and the line
object itself. Whereas the line object is used to hold all relevant
power system parameters.
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_edge(1, 2, type='line')
>>> lines = G.lines_by_attribute('line')
>>> list(lines)[0]
<class 'tuple'>: ((node1, node2), line)
Parameters
----------
attr_val: str
Value of the `attr` lines should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
Generator of :obj:`dict`
A list containing line elements that match the given attribute
value
Notes
-----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('line' is used here)
To make access to attributes of the line objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the line object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
Adapted from `Dingo <https://github.com/openego/dingo/blob/ ee237e37d4c228081e1e246d7e6d0d431c6dda9e/dingo/core/network/ __init__.py>`_.
]
variable[lines_attributes] assign[=] call[call[name[nx].get_edge_attributes, parameter[name[self], name[attr]]].items, parameter[]]
if name[attr_val] begin[:]
variable[lines_attributes] assign[=] <ast.ListComp object at 0x7da1b03cbee0>
variable[lines_sorted] assign[=] call[name[sorted], parameter[call[name[list], parameter[name[lines_attributes]]]]]
for taget[name[line]] in starred[name[lines_sorted]] begin[:]
<ast.Yield object at 0x7da1b036a800> | keyword[def] identifier[lines_by_attribute] ( identifier[self] , identifier[attr_val] = keyword[None] , identifier[attr] = literal[string] ):
literal[string]
identifier[lines_attributes] = identifier[nx] . identifier[get_edge_attributes] ( identifier[self] , identifier[attr] ). identifier[items] ()
keyword[if] identifier[attr_val] :
identifier[lines_attributes] =[( identifier[k] , identifier[self] [ identifier[k] [ literal[int] ]][ identifier[k] [ literal[int] ]][ literal[string] ])
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[lines_attributes] keyword[if] identifier[v] == identifier[attr_val] ]
keyword[else] :
identifier[lines_attributes] =[( identifier[k] , identifier[self] [ identifier[k] [ literal[int] ]][ identifier[k] [ literal[int] ]][ literal[string] ])
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[lines_attributes] ]
identifier[lines_sorted] = identifier[sorted] ( identifier[list] ( identifier[lines_attributes] ), identifier[key] = keyword[lambda] identifier[_] : identifier[repr] ( identifier[_] [ literal[int] ]))
keyword[for] identifier[line] keyword[in] identifier[lines_sorted] :
keyword[yield] { literal[string] : identifier[line] [ literal[int] ], literal[string] : identifier[line] [ literal[int] ]} | def lines_by_attribute(self, attr_val=None, attr='type'):
""" Returns a generator for iterating over Graph's lines by attribute value.
Get all lines that share the same attribute. By default, the attr 'type'
is used to specify the lines' type (line, agg_line, etc.).
The edge of a graph is described by the two adjacent nodes and the line
object itself. Whereas the line object is used to hold all relevant
power system parameters.
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_edge(1, 2, type='line')
>>> lines = G.lines_by_attribute('line')
>>> list(lines)[0]
<class 'tuple'>: ((node1, node2), line)
Parameters
----------
attr_val: str
Value of the `attr` lines should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
Generator of :obj:`dict`
A list containing line elements that match the given attribute
value
Notes
-----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('line' is used here)
To make access to attributes of the line objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the line object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
Adapted from `Dingo <https://github.com/openego/dingo/blob/ ee237e37d4c228081e1e246d7e6d0d431c6dda9e/dingo/core/network/ __init__.py>`_.
"""
# get all lines that have the attribute 'type' set
lines_attributes = nx.get_edge_attributes(self, attr).items()
# attribute value provided?
if attr_val:
# extract lines where 'type' == attr_val
lines_attributes = [(k, self[k[0]][k[1]]['line']) for (k, v) in lines_attributes if v == attr_val] # depends on [control=['if'], data=[]]
else:
# get all lines
lines_attributes = [(k, self[k[0]][k[1]]['line']) for (k, v) in lines_attributes]
# sort them according to connected nodes
lines_sorted = sorted(list(lines_attributes), key=lambda _: repr(_[1]))
for line in lines_sorted:
yield {'adj_nodes': line[0], 'line': line[1]} # depends on [control=['for'], data=['line']] |
def read_namespaced_pod_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_pod_status # noqa: E501
read status of the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[read_namespaced_pod_status, parameter[self, name, namespace]]:
constant[read_namespaced_pod_status # noqa: E501
read status of the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].read_namespaced_pod_status_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[read_namespaced_pod_status] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[read_namespaced_pod_status_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[read_namespaced_pod_status_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def read_namespaced_pod_status(self, name, namespace, **kwargs): # noqa: E501
"read_namespaced_pod_status # noqa: E501\n\n read status of the specified Pod # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Pod (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :return: V1Pod\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def _batch_postgres_query(table, records):
"""Break the list into chunks that can be processed as a single statement.
Postgres query cannot be too long or it will fail.
See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum-
length-constraint-for-a-postgres-query
:param records: The full list of records to batch.
:type records: iterable
:param table: The sqlalchemy table.
:return: A generator of lists of records.
"""
if not records:
return
POSTGRESQL_MAX = 0x3FFFFFFF
# Create preamble and measure its length
preamble = (
"INSERT INTO "
+ table.__tablename__
+ " ("
+ ", ".join(records[0].keys())
+ ") VALUES ("
+ ", ".join(["?"] * len(records[0].keys()))
+ ")\n"
)
start = 0
end = 0
total_len = len(preamble)
while end < len(records):
record_len = sum([len(str(v)) for v in records[end].values()])
# Pre-increment to include the end element in the slice
end += 1
if total_len + record_len >= POSTGRESQL_MAX:
logger.debug(f"Splitting query due to length ({total_len} chars).")
yield records[start:end]
start = end
# Reset the total query length
total_len = len(preamble)
else:
total_len += record_len
yield records[start:end] | def function[_batch_postgres_query, parameter[table, records]]:
constant[Break the list into chunks that can be processed as a single statement.
Postgres query cannot be too long or it will fail.
See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum-
length-constraint-for-a-postgres-query
:param records: The full list of records to batch.
:type records: iterable
:param table: The sqlalchemy table.
:return: A generator of lists of records.
]
if <ast.UnaryOp object at 0x7da1b26ac850> begin[:]
return[None]
variable[POSTGRESQL_MAX] assign[=] constant[1073741823]
variable[preamble] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[INSERT INTO ] + name[table].__tablename__] + constant[ (]] + call[constant[, ].join, parameter[call[call[name[records]][constant[0]].keys, parameter[]]]]] + constant[) VALUES (]] + call[constant[, ].join, parameter[binary_operation[list[[<ast.Constant object at 0x7da2047e8fa0>]] * call[name[len], parameter[call[call[name[records]][constant[0]].keys, parameter[]]]]]]]] + constant[)
]]
variable[start] assign[=] constant[0]
variable[end] assign[=] constant[0]
variable[total_len] assign[=] call[name[len], parameter[name[preamble]]]
while compare[name[end] less[<] call[name[len], parameter[name[records]]]] begin[:]
variable[record_len] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2047e9270>]]
<ast.AugAssign object at 0x7da2047e8fd0>
if compare[binary_operation[name[total_len] + name[record_len]] greater_or_equal[>=] name[POSTGRESQL_MAX]] begin[:]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da2047ea710>]]
<ast.Yield object at 0x7da2047ea350>
variable[start] assign[=] name[end]
variable[total_len] assign[=] call[name[len], parameter[name[preamble]]]
<ast.Yield object at 0x7da2047ead70> | keyword[def] identifier[_batch_postgres_query] ( identifier[table] , identifier[records] ):
literal[string]
keyword[if] keyword[not] identifier[records] :
keyword[return]
identifier[POSTGRESQL_MAX] = literal[int]
identifier[preamble] =(
literal[string]
+ identifier[table] . identifier[__tablename__]
+ literal[string]
+ literal[string] . identifier[join] ( identifier[records] [ literal[int] ]. identifier[keys] ())
+ literal[string]
+ literal[string] . identifier[join] ([ literal[string] ]* identifier[len] ( identifier[records] [ literal[int] ]. identifier[keys] ()))
+ literal[string]
)
identifier[start] = literal[int]
identifier[end] = literal[int]
identifier[total_len] = identifier[len] ( identifier[preamble] )
keyword[while] identifier[end] < identifier[len] ( identifier[records] ):
identifier[record_len] = identifier[sum] ([ identifier[len] ( identifier[str] ( identifier[v] )) keyword[for] identifier[v] keyword[in] identifier[records] [ identifier[end] ]. identifier[values] ()])
identifier[end] += literal[int]
keyword[if] identifier[total_len] + identifier[record_len] >= identifier[POSTGRESQL_MAX] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[yield] identifier[records] [ identifier[start] : identifier[end] ]
identifier[start] = identifier[end]
identifier[total_len] = identifier[len] ( identifier[preamble] )
keyword[else] :
identifier[total_len] += identifier[record_len]
keyword[yield] identifier[records] [ identifier[start] : identifier[end] ] | def _batch_postgres_query(table, records):
"""Break the list into chunks that can be processed as a single statement.
Postgres query cannot be too long or it will fail.
See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum-
length-constraint-for-a-postgres-query
:param records: The full list of records to batch.
:type records: iterable
:param table: The sqlalchemy table.
:return: A generator of lists of records.
"""
if not records:
return # depends on [control=['if'], data=[]]
POSTGRESQL_MAX = 1073741823
# Create preamble and measure its length
preamble = 'INSERT INTO ' + table.__tablename__ + ' (' + ', '.join(records[0].keys()) + ') VALUES (' + ', '.join(['?'] * len(records[0].keys())) + ')\n'
start = 0
end = 0
total_len = len(preamble)
while end < len(records):
record_len = sum([len(str(v)) for v in records[end].values()])
# Pre-increment to include the end element in the slice
end += 1
if total_len + record_len >= POSTGRESQL_MAX:
logger.debug(f'Splitting query due to length ({total_len} chars).')
yield records[start:end]
start = end
# Reset the total query length
total_len = len(preamble) # depends on [control=['if'], data=[]]
else:
total_len += record_len # depends on [control=['while'], data=['end']]
yield records[start:end] |
def _connect(self, config):
"""Establish a connection with a MySQL database."""
if 'connection_timeout' not in self._config:
self._config['connection_timeout'] = 480
try:
self._cnx = connect(**config)
self._cursor = self._cnx.cursor()
self._printer('\tMySQL DB connection established with db', config['database'])
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
raise err | def function[_connect, parameter[self, config]]:
constant[Establish a connection with a MySQL database.]
if compare[constant[connection_timeout] <ast.NotIn object at 0x7da2590d7190> name[self]._config] begin[:]
call[name[self]._config][constant[connection_timeout]] assign[=] constant[480]
<ast.Try object at 0x7da1b0a87d30> | keyword[def] identifier[_connect] ( identifier[self] , identifier[config] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_config] :
identifier[self] . identifier[_config] [ literal[string] ]= literal[int]
keyword[try] :
identifier[self] . identifier[_cnx] = identifier[connect] (** identifier[config] )
identifier[self] . identifier[_cursor] = identifier[self] . identifier[_cnx] . identifier[cursor] ()
identifier[self] . identifier[_printer] ( literal[string] , identifier[config] [ literal[string] ])
keyword[except] identifier[Error] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[errno] == identifier[errorcode] . identifier[ER_ACCESS_DENIED_ERROR] :
identifier[print] ( literal[string] )
keyword[elif] identifier[err] . identifier[errno] == identifier[errorcode] . identifier[ER_BAD_DB_ERROR] :
identifier[print] ( literal[string] )
keyword[raise] identifier[err] | def _connect(self, config):
"""Establish a connection with a MySQL database."""
if 'connection_timeout' not in self._config:
self._config['connection_timeout'] = 480 # depends on [control=['if'], data=[]]
try:
self._cnx = connect(**config)
self._cursor = self._cnx.cursor()
self._printer('\tMySQL DB connection established with db', config['database']) # depends on [control=['try'], data=[]]
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print('Something is wrong with your user name or password') # depends on [control=['if'], data=[]]
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print('Database does not exist') # depends on [control=['if'], data=[]]
raise err # depends on [control=['except'], data=['err']] |
def saveAs(self, path):
"""
save to file under given name
"""
if not path:
path = self._dialogs.getSaveFileName(filter='*.csv')
if path:
self._setPath(path)
with open(str(self._path), 'wb') as stream:
writer = csv.writer(stream)
table = self.table()
for row in table:
writer.writerow(row) | def function[saveAs, parameter[self, path]]:
constant[
save to file under given name
]
if <ast.UnaryOp object at 0x7da1b0a1d7e0> begin[:]
variable[path] assign[=] call[name[self]._dialogs.getSaveFileName, parameter[]]
if name[path] begin[:]
call[name[self]._setPath, parameter[name[path]]]
with call[name[open], parameter[call[name[str], parameter[name[self]._path]], constant[wb]]] begin[:]
variable[writer] assign[=] call[name[csv].writer, parameter[name[stream]]]
variable[table] assign[=] call[name[self].table, parameter[]]
for taget[name[row]] in starred[name[table]] begin[:]
call[name[writer].writerow, parameter[name[row]]] | keyword[def] identifier[saveAs] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
identifier[path] = identifier[self] . identifier[_dialogs] . identifier[getSaveFileName] ( identifier[filter] = literal[string] )
keyword[if] identifier[path] :
identifier[self] . identifier[_setPath] ( identifier[path] )
keyword[with] identifier[open] ( identifier[str] ( identifier[self] . identifier[_path] ), literal[string] ) keyword[as] identifier[stream] :
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[stream] )
identifier[table] = identifier[self] . identifier[table] ()
keyword[for] identifier[row] keyword[in] identifier[table] :
identifier[writer] . identifier[writerow] ( identifier[row] ) | def saveAs(self, path):
"""
save to file under given name
"""
if not path:
path = self._dialogs.getSaveFileName(filter='*.csv') # depends on [control=['if'], data=[]]
if path:
self._setPath(path)
with open(str(self._path), 'wb') as stream:
writer = csv.writer(stream)
table = self.table()
for row in table:
writer.writerow(row) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['stream']] # depends on [control=['if'], data=[]] |
def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
if os.path.isdir(repo):
repo = os.path.abspath(repo)
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd)
except CalledProcessError:
self._complete_clone(ref, _git_cmd)
return self._new_repo(repo, ref, deps, clone_strategy) | def function[clone, parameter[self, repo, ref, deps]]:
constant[Clone the given url and checkout the specific ref.]
if call[name[os].path.isdir, parameter[name[repo]]] begin[:]
variable[repo] assign[=] call[name[os].path.abspath, parameter[name[repo]]]
def function[clone_strategy, parameter[directory]]:
variable[env] assign[=] call[name[git].no_git_env, parameter[]]
def function[_git_cmd, parameter[]]:
call[name[cmd_output], parameter[constant[git], <ast.Starred object at 0x7da18ede4af0>]]
call[name[_git_cmd], parameter[constant[init], constant[.]]]
call[name[_git_cmd], parameter[constant[remote], constant[add], constant[origin], name[repo]]]
<ast.Try object at 0x7da18ede5300>
return[call[name[self]._new_repo, parameter[name[repo], name[ref], name[deps], name[clone_strategy]]]] | keyword[def] identifier[clone] ( identifier[self] , identifier[repo] , identifier[ref] , identifier[deps] =()):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[repo] ):
identifier[repo] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[repo] )
keyword[def] identifier[clone_strategy] ( identifier[directory] ):
identifier[env] = identifier[git] . identifier[no_git_env] ()
keyword[def] identifier[_git_cmd] (* identifier[args] ):
identifier[cmd_output] ( literal[string] ,* identifier[args] , identifier[cwd] = identifier[directory] , identifier[env] = identifier[env] )
identifier[_git_cmd] ( literal[string] , literal[string] )
identifier[_git_cmd] ( literal[string] , literal[string] , literal[string] , identifier[repo] )
keyword[try] :
identifier[self] . identifier[_shallow_clone] ( identifier[ref] , identifier[_git_cmd] )
keyword[except] identifier[CalledProcessError] :
identifier[self] . identifier[_complete_clone] ( identifier[ref] , identifier[_git_cmd] )
keyword[return] identifier[self] . identifier[_new_repo] ( identifier[repo] , identifier[ref] , identifier[deps] , identifier[clone_strategy] ) | def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
if os.path.isdir(repo):
repo = os.path.abspath(repo) # depends on [control=['if'], data=[]]
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd) # depends on [control=['try'], data=[]]
except CalledProcessError:
self._complete_clone(ref, _git_cmd) # depends on [control=['except'], data=[]]
return self._new_repo(repo, ref, deps, clone_strategy) |
def create_xml_path(path, **kwargs):
'''
Start a transient domain based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.create_xml_path <path to XML file on the node>
'''
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return create_xml_str(
salt.utils.stringutils.to_unicode(fp_.read()),
**kwargs
)
except (OSError, IOError):
return False | def function[create_xml_path, parameter[path]]:
constant[
Start a transient domain based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.create_xml_path <path to XML file on the node>
]
<ast.Try object at 0x7da207f03700> | keyword[def] identifier[create_xml_path] ( identifier[path] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] , literal[string] ) keyword[as] identifier[fp_] :
keyword[return] identifier[create_xml_str] (
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[fp_] . identifier[read] ()),
** identifier[kwargs]
)
keyword[except] ( identifier[OSError] , identifier[IOError] ):
keyword[return] keyword[False] | def create_xml_path(path, **kwargs):
"""
Start a transient domain based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.create_xml_path <path to XML file on the node>
"""
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return create_xml_str(salt.utils.stringutils.to_unicode(fp_.read()), **kwargs) # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]]
except (OSError, IOError):
return False # depends on [control=['except'], data=[]] |
def run(self, interpreter):
"""Executes the code of the specified module. Deserializes captured
json data.
"""
with utils.ChangeDir(self.dirname):
command_list = ['PYTHONPATH=' + main_dir, interpreter,
self.filename] + list(self.args)
try:
proc = Popen(' '.join(command_list), stdout=PIPE, stderr=PIPE,
shell=True)
stream_data = proc.communicate()
except Exception as e:
logger.error(
"Error {0} while executing extract_dist command.".format(e))
raise ExtractionError
stream_data = [utils.console_to_str(s) for s in stream_data]
if proc.returncode:
logger.error(
"Subprocess failed, stdout: {0[0]}, stderr: {0[1]}".format(
stream_data))
self._result = json.loads(stream_data[0].split(
"extracted json data:\n")[-1].split("\n")[0]) | def function[run, parameter[self, interpreter]]:
constant[Executes the code of the specified module. Deserializes captured
json data.
]
with call[name[utils].ChangeDir, parameter[name[self].dirname]] begin[:]
variable[command_list] assign[=] binary_operation[list[[<ast.BinOp object at 0x7da1b1bf8160>, <ast.Name object at 0x7da1b1bf8190>, <ast.Attribute object at 0x7da1b1bf9660>]] + call[name[list], parameter[name[self].args]]]
<ast.Try object at 0x7da1b1bf91e0>
variable[stream_data] assign[=] <ast.ListComp object at 0x7da1b1bf9690>
if name[proc].returncode begin[:]
call[name[logger].error, parameter[call[constant[Subprocess failed, stdout: {0[0]}, stderr: {0[1]}].format, parameter[name[stream_data]]]]]
name[self]._result assign[=] call[name[json].loads, parameter[call[call[call[call[call[name[stream_data]][constant[0]].split, parameter[constant[extracted json data:
]]]][<ast.UnaryOp object at 0x7da1b1bf8400>].split, parameter[constant[
]]]][constant[0]]]] | keyword[def] identifier[run] ( identifier[self] , identifier[interpreter] ):
literal[string]
keyword[with] identifier[utils] . identifier[ChangeDir] ( identifier[self] . identifier[dirname] ):
identifier[command_list] =[ literal[string] + identifier[main_dir] , identifier[interpreter] ,
identifier[self] . identifier[filename] ]+ identifier[list] ( identifier[self] . identifier[args] )
keyword[try] :
identifier[proc] = identifier[Popen] ( literal[string] . identifier[join] ( identifier[command_list] ), identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] ,
identifier[shell] = keyword[True] )
identifier[stream_data] = identifier[proc] . identifier[communicate] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] (
literal[string] . identifier[format] ( identifier[e] ))
keyword[raise] identifier[ExtractionError]
identifier[stream_data] =[ identifier[utils] . identifier[console_to_str] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[stream_data] ]
keyword[if] identifier[proc] . identifier[returncode] :
identifier[logger] . identifier[error] (
literal[string] . identifier[format] (
identifier[stream_data] ))
identifier[self] . identifier[_result] = identifier[json] . identifier[loads] ( identifier[stream_data] [ literal[int] ]. identifier[split] (
literal[string] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]) | def run(self, interpreter):
"""Executes the code of the specified module. Deserializes captured
json data.
"""
with utils.ChangeDir(self.dirname):
command_list = ['PYTHONPATH=' + main_dir, interpreter, self.filename] + list(self.args)
try:
proc = Popen(' '.join(command_list), stdout=PIPE, stderr=PIPE, shell=True)
stream_data = proc.communicate() # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Error {0} while executing extract_dist command.'.format(e))
raise ExtractionError # depends on [control=['except'], data=['e']]
stream_data = [utils.console_to_str(s) for s in stream_data]
if proc.returncode:
logger.error('Subprocess failed, stdout: {0[0]}, stderr: {0[1]}'.format(stream_data)) # depends on [control=['if'], data=[]]
self._result = json.loads(stream_data[0].split('extracted json data:\n')[-1].split('\n')[0]) # depends on [control=['with'], data=[]] |
def create_client_event(
self,
parent,
client_event,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Report events issued when end user interacts with customer's application
that uses Cloud Talent Solution. You may inspect the created events in
`self service
tools <https://console.cloud.google.com/talent-solution/overview>`__.
`Learn
more <https://cloud.google.com/talent-solution/docs/management-tools>`__
about self service tools.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.EventServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `client_event`:
>>> client_event = {}
>>>
>>> response = client.create_client_event(parent, client_event)
Args:
parent (str): Parent project name.
client_event (Union[dict, ~google.cloud.talent_v4beta1.types.ClientEvent]): Required.
Events issued when end user interacts with customer's application that
uses Cloud Talent Solution.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.ClientEvent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.ClientEvent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_client_event" not in self._inner_api_calls:
self._inner_api_calls[
"create_client_event"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_client_event,
default_retry=self._method_configs["CreateClientEvent"].retry,
default_timeout=self._method_configs["CreateClientEvent"].timeout,
client_info=self._client_info,
)
request = event_service_pb2.CreateClientEventRequest(
parent=parent, client_event=client_event
)
return self._inner_api_calls["create_client_event"](
request, retry=retry, timeout=timeout, metadata=metadata
) | def function[create_client_event, parameter[self, parent, client_event, retry, timeout, metadata]]:
constant[
Report events issued when end user interacts with customer's application
that uses Cloud Talent Solution. You may inspect the created events in
`self service
tools <https://console.cloud.google.com/talent-solution/overview>`__.
`Learn
more <https://cloud.google.com/talent-solution/docs/management-tools>`__
about self service tools.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.EventServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `client_event`:
>>> client_event = {}
>>>
>>> response = client.create_client_event(parent, client_event)
Args:
parent (str): Parent project name.
client_event (Union[dict, ~google.cloud.talent_v4beta1.types.ClientEvent]): Required.
Events issued when end user interacts with customer's application that
uses Cloud Talent Solution.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.ClientEvent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.ClientEvent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[create_client_event] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[create_client_event]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.create_client_event]]
variable[request] assign[=] call[name[event_service_pb2].CreateClientEventRequest, parameter[]]
return[call[call[name[self]._inner_api_calls][constant[create_client_event]], parameter[name[request]]]] | keyword[def] identifier[create_client_event] (
identifier[self] ,
identifier[parent] ,
identifier[client_event] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[create_client_event] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[event_service_pb2] . identifier[CreateClientEventRequest] (
identifier[parent] = identifier[parent] , identifier[client_event] = identifier[client_event]
)
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def create_client_event(self, parent, client_event, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Report events issued when end user interacts with customer's application
that uses Cloud Talent Solution. You may inspect the created events in
`self service
tools <https://console.cloud.google.com/talent-solution/overview>`__.
`Learn
more <https://cloud.google.com/talent-solution/docs/management-tools>`__
about self service tools.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.EventServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `client_event`:
>>> client_event = {}
>>>
>>> response = client.create_client_event(parent, client_event)
Args:
parent (str): Parent project name.
client_event (Union[dict, ~google.cloud.talent_v4beta1.types.ClientEvent]): Required.
Events issued when end user interacts with customer's application that
uses Cloud Talent Solution.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.ClientEvent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.ClientEvent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_client_event' not in self._inner_api_calls:
self._inner_api_calls['create_client_event'] = google.api_core.gapic_v1.method.wrap_method(self.transport.create_client_event, default_retry=self._method_configs['CreateClientEvent'].retry, default_timeout=self._method_configs['CreateClientEvent'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = event_service_pb2.CreateClientEventRequest(parent=parent, client_event=client_event)
return self._inner_api_calls['create_client_event'](request, retry=retry, timeout=timeout, metadata=metadata) |
def _validate_options(cls, options):
"""Validate the mutually exclusive options.
Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS`
was selected.
"""
for opt1, opt2 in \
itertools.permutations(cls.BASE_ERROR_SELECTION_OPTIONS, 2):
if getattr(options, opt1) and getattr(options, opt2):
log.error('Cannot pass both {} and {}. They are '
'mutually exclusive.'.format(opt1, opt2))
return False
if options.convention and options.convention not in conventions:
log.error("Illegal convention '{}'. Possible conventions: {}"
.format(options.convention,
', '.join(conventions.keys())))
return False
return True | def function[_validate_options, parameter[cls, options]]:
constant[Validate the mutually exclusive options.
Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS`
was selected.
]
for taget[tuple[[<ast.Name object at 0x7da18dc99b40>, <ast.Name object at 0x7da18dc9a560>]]] in starred[call[name[itertools].permutations, parameter[name[cls].BASE_ERROR_SELECTION_OPTIONS, constant[2]]]] begin[:]
if <ast.BoolOp object at 0x7da18dc9b850> begin[:]
call[name[log].error, parameter[call[constant[Cannot pass both {} and {}. They are mutually exclusive.].format, parameter[name[opt1], name[opt2]]]]]
return[constant[False]]
if <ast.BoolOp object at 0x7da18dc996f0> begin[:]
call[name[log].error, parameter[call[constant[Illegal convention '{}'. Possible conventions: {}].format, parameter[name[options].convention, call[constant[, ].join, parameter[call[name[conventions].keys, parameter[]]]]]]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_validate_options] ( identifier[cls] , identifier[options] ):
literal[string]
keyword[for] identifier[opt1] , identifier[opt2] keyword[in] identifier[itertools] . identifier[permutations] ( identifier[cls] . identifier[BASE_ERROR_SELECTION_OPTIONS] , literal[int] ):
keyword[if] identifier[getattr] ( identifier[options] , identifier[opt1] ) keyword[and] identifier[getattr] ( identifier[options] , identifier[opt2] ):
identifier[log] . identifier[error] ( literal[string]
literal[string] . identifier[format] ( identifier[opt1] , identifier[opt2] ))
keyword[return] keyword[False]
keyword[if] identifier[options] . identifier[convention] keyword[and] identifier[options] . identifier[convention] keyword[not] keyword[in] identifier[conventions] :
identifier[log] . identifier[error] ( literal[string]
. identifier[format] ( identifier[options] . identifier[convention] ,
literal[string] . identifier[join] ( identifier[conventions] . identifier[keys] ())))
keyword[return] keyword[False]
keyword[return] keyword[True] | def _validate_options(cls, options):
"""Validate the mutually exclusive options.
Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS`
was selected.
"""
for (opt1, opt2) in itertools.permutations(cls.BASE_ERROR_SELECTION_OPTIONS, 2):
if getattr(options, opt1) and getattr(options, opt2):
log.error('Cannot pass both {} and {}. They are mutually exclusive.'.format(opt1, opt2))
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if options.convention and options.convention not in conventions:
log.error("Illegal convention '{}'. Possible conventions: {}".format(options.convention, ', '.join(conventions.keys())))
return False # depends on [control=['if'], data=[]]
return True |
def append_process(xmldoc, program = None, version = None, cvs_repository = None, cvs_entry_time = None, comment = None, is_online = False, jobid = 0, domain = None, ifos = None):
"""
Add an entry to the process table in xmldoc. program, version,
cvs_repository, comment, and domain should all be strings or
unicodes. cvs_entry_time should be a string or unicode in the
format "YYYY/MM/DD HH:MM:SS". is_online should be a boolean, jobid
an integer. ifos should be an iterable (set, tuple, etc.) of
instrument names.
See also register_to_xmldoc().
"""
try:
proctable = lsctables.ProcessTable.get_table(xmldoc)
except ValueError:
proctable = lsctables.New(lsctables.ProcessTable)
xmldoc.childNodes[0].appendChild(proctable)
proctable.sync_next_id()
process = proctable.RowType()
process.program = program
process.version = version
process.cvs_repository = cvs_repository
# FIXME: remove the "" case when the git versioning business is
# sorted out
if cvs_entry_time is not None and cvs_entry_time != "":
try:
# try the git_version format first
process.cvs_entry_time = _UTCToGPS(time.strptime(cvs_entry_time, "%Y-%m-%d %H:%M:%S +0000"))
except ValueError:
# fall back to the old cvs format
process.cvs_entry_time = _UTCToGPS(time.strptime(cvs_entry_time, "%Y/%m/%d %H:%M:%S"))
else:
process.cvs_entry_time = None
process.comment = comment
process.is_online = int(is_online)
process.node = socket.gethostname()
try:
process.username = get_username()
except KeyError:
process.username = None
process.unix_procid = os.getpid()
process.start_time = _UTCToGPS(time.gmtime())
process.end_time = None
process.jobid = jobid
process.domain = domain
process.instruments = ifos
process.process_id = proctable.get_next_id()
proctable.append(process)
return process | def function[append_process, parameter[xmldoc, program, version, cvs_repository, cvs_entry_time, comment, is_online, jobid, domain, ifos]]:
constant[
Add an entry to the process table in xmldoc. program, version,
cvs_repository, comment, and domain should all be strings or
unicodes. cvs_entry_time should be a string or unicode in the
format "YYYY/MM/DD HH:MM:SS". is_online should be a boolean, jobid
an integer. ifos should be an iterable (set, tuple, etc.) of
instrument names.
See also register_to_xmldoc().
]
<ast.Try object at 0x7da18f722500>
call[name[proctable].sync_next_id, parameter[]]
variable[process] assign[=] call[name[proctable].RowType, parameter[]]
name[process].program assign[=] name[program]
name[process].version assign[=] name[version]
name[process].cvs_repository assign[=] name[cvs_repository]
if <ast.BoolOp object at 0x7da18f722080> begin[:]
<ast.Try object at 0x7da18f7228c0>
name[process].comment assign[=] name[comment]
name[process].is_online assign[=] call[name[int], parameter[name[is_online]]]
name[process].node assign[=] call[name[socket].gethostname, parameter[]]
<ast.Try object at 0x7da18f720220>
name[process].unix_procid assign[=] call[name[os].getpid, parameter[]]
name[process].start_time assign[=] call[name[_UTCToGPS], parameter[call[name[time].gmtime, parameter[]]]]
name[process].end_time assign[=] constant[None]
name[process].jobid assign[=] name[jobid]
name[process].domain assign[=] name[domain]
name[process].instruments assign[=] name[ifos]
name[process].process_id assign[=] call[name[proctable].get_next_id, parameter[]]
call[name[proctable].append, parameter[name[process]]]
return[name[process]] | keyword[def] identifier[append_process] ( identifier[xmldoc] , identifier[program] = keyword[None] , identifier[version] = keyword[None] , identifier[cvs_repository] = keyword[None] , identifier[cvs_entry_time] = keyword[None] , identifier[comment] = keyword[None] , identifier[is_online] = keyword[False] , identifier[jobid] = literal[int] , identifier[domain] = keyword[None] , identifier[ifos] = keyword[None] ):
literal[string]
keyword[try] :
identifier[proctable] = identifier[lsctables] . identifier[ProcessTable] . identifier[get_table] ( identifier[xmldoc] )
keyword[except] identifier[ValueError] :
identifier[proctable] = identifier[lsctables] . identifier[New] ( identifier[lsctables] . identifier[ProcessTable] )
identifier[xmldoc] . identifier[childNodes] [ literal[int] ]. identifier[appendChild] ( identifier[proctable] )
identifier[proctable] . identifier[sync_next_id] ()
identifier[process] = identifier[proctable] . identifier[RowType] ()
identifier[process] . identifier[program] = identifier[program]
identifier[process] . identifier[version] = identifier[version]
identifier[process] . identifier[cvs_repository] = identifier[cvs_repository]
keyword[if] identifier[cvs_entry_time] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cvs_entry_time] != literal[string] :
keyword[try] :
identifier[process] . identifier[cvs_entry_time] = identifier[_UTCToGPS] ( identifier[time] . identifier[strptime] ( identifier[cvs_entry_time] , literal[string] ))
keyword[except] identifier[ValueError] :
identifier[process] . identifier[cvs_entry_time] = identifier[_UTCToGPS] ( identifier[time] . identifier[strptime] ( identifier[cvs_entry_time] , literal[string] ))
keyword[else] :
identifier[process] . identifier[cvs_entry_time] = keyword[None]
identifier[process] . identifier[comment] = identifier[comment]
identifier[process] . identifier[is_online] = identifier[int] ( identifier[is_online] )
identifier[process] . identifier[node] = identifier[socket] . identifier[gethostname] ()
keyword[try] :
identifier[process] . identifier[username] = identifier[get_username] ()
keyword[except] identifier[KeyError] :
identifier[process] . identifier[username] = keyword[None]
identifier[process] . identifier[unix_procid] = identifier[os] . identifier[getpid] ()
identifier[process] . identifier[start_time] = identifier[_UTCToGPS] ( identifier[time] . identifier[gmtime] ())
identifier[process] . identifier[end_time] = keyword[None]
identifier[process] . identifier[jobid] = identifier[jobid]
identifier[process] . identifier[domain] = identifier[domain]
identifier[process] . identifier[instruments] = identifier[ifos]
identifier[process] . identifier[process_id] = identifier[proctable] . identifier[get_next_id] ()
identifier[proctable] . identifier[append] ( identifier[process] )
keyword[return] identifier[process] | def append_process(xmldoc, program=None, version=None, cvs_repository=None, cvs_entry_time=None, comment=None, is_online=False, jobid=0, domain=None, ifos=None):
"""
Add an entry to the process table in xmldoc. program, version,
cvs_repository, comment, and domain should all be strings or
unicodes. cvs_entry_time should be a string or unicode in the
format "YYYY/MM/DD HH:MM:SS". is_online should be a boolean, jobid
an integer. ifos should be an iterable (set, tuple, etc.) of
instrument names.
See also register_to_xmldoc().
"""
try:
proctable = lsctables.ProcessTable.get_table(xmldoc) # depends on [control=['try'], data=[]]
except ValueError:
proctable = lsctables.New(lsctables.ProcessTable)
xmldoc.childNodes[0].appendChild(proctable) # depends on [control=['except'], data=[]]
proctable.sync_next_id()
process = proctable.RowType()
process.program = program
process.version = version
process.cvs_repository = cvs_repository # FIXME: remove the "" case when the git versioning business is
# sorted out
if cvs_entry_time is not None and cvs_entry_time != '':
try: # try the git_version format first
process.cvs_entry_time = _UTCToGPS(time.strptime(cvs_entry_time, '%Y-%m-%d %H:%M:%S +0000')) # depends on [control=['try'], data=[]]
except ValueError: # fall back to the old cvs format
process.cvs_entry_time = _UTCToGPS(time.strptime(cvs_entry_time, '%Y/%m/%d %H:%M:%S')) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
process.cvs_entry_time = None
process.comment = comment
process.is_online = int(is_online)
process.node = socket.gethostname()
try:
process.username = get_username() # depends on [control=['try'], data=[]]
except KeyError:
process.username = None # depends on [control=['except'], data=[]]
process.unix_procid = os.getpid()
process.start_time = _UTCToGPS(time.gmtime())
process.end_time = None
process.jobid = jobid
process.domain = domain
process.instruments = ifos
process.process_id = proctable.get_next_id()
proctable.append(process)
return process |
def pick(self, acr=None):
"""
Given the authentication context find zero or more authn methods
that could be used.
:param acr: The authentication class reference requested
:return: An URL
"""
if acr is None:
# Anything else doesn't make sense
return self.db.values()
else:
return self._pick_by_class_ref(acr) | def function[pick, parameter[self, acr]]:
constant[
Given the authentication context find zero or more authn methods
that could be used.
:param acr: The authentication class reference requested
:return: An URL
]
if compare[name[acr] is constant[None]] begin[:]
return[call[name[self].db.values, parameter[]]] | keyword[def] identifier[pick] ( identifier[self] , identifier[acr] = keyword[None] ):
literal[string]
keyword[if] identifier[acr] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[db] . identifier[values] ()
keyword[else] :
keyword[return] identifier[self] . identifier[_pick_by_class_ref] ( identifier[acr] ) | def pick(self, acr=None):
"""
Given the authentication context find zero or more authn methods
that could be used.
:param acr: The authentication class reference requested
:return: An URL
"""
if acr is None:
# Anything else doesn't make sense
return self.db.values() # depends on [control=['if'], data=[]]
else:
return self._pick_by_class_ref(acr) |
def make_hash_kw(self, tok):
'''Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash
'''
if isinstance(tok, unicode):
tok = tok.encode('utf-8')
h = mmh3.hash(tok)
if h == DOCUMENT_HASH_KEY:
h = DOCUMENT_HASH_KEY_REPLACEMENT
return (tok, h) | def function[make_hash_kw, parameter[self, tok]]:
constant[Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash
]
if call[name[isinstance], parameter[name[tok], name[unicode]]] begin[:]
variable[tok] assign[=] call[name[tok].encode, parameter[constant[utf-8]]]
variable[h] assign[=] call[name[mmh3].hash, parameter[name[tok]]]
if compare[name[h] equal[==] name[DOCUMENT_HASH_KEY]] begin[:]
variable[h] assign[=] name[DOCUMENT_HASH_KEY_REPLACEMENT]
return[tuple[[<ast.Name object at 0x7da1b021d9c0>, <ast.Name object at 0x7da1b021e5f0>]]] | keyword[def] identifier[make_hash_kw] ( identifier[self] , identifier[tok] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[tok] , identifier[unicode] ):
identifier[tok] = identifier[tok] . identifier[encode] ( literal[string] )
identifier[h] = identifier[mmh3] . identifier[hash] ( identifier[tok] )
keyword[if] identifier[h] == identifier[DOCUMENT_HASH_KEY] :
identifier[h] = identifier[DOCUMENT_HASH_KEY_REPLACEMENT]
keyword[return] ( identifier[tok] , identifier[h] ) | def make_hash_kw(self, tok):
"""Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash
"""
if isinstance(tok, unicode):
tok = tok.encode('utf-8') # depends on [control=['if'], data=[]]
h = mmh3.hash(tok)
if h == DOCUMENT_HASH_KEY:
h = DOCUMENT_HASH_KEY_REPLACEMENT # depends on [control=['if'], data=['h']]
return (tok, h) |
def release():
"""Bump version, tag, build, gen docs."""
if check_staged():
raise EnvironmentError('There are staged changes, abort.')
if check_unstaged():
raise EnvironmentError('There are unstaged changes, abort.')
bump()
tag()
build()
doc_gen()
puts(colored.yellow("Remember to upload documentation and package:"))
with indent(2):
puts(colored.cyan("shovel doc.upload"))
puts(colored.cyan("shovel version.upload")) | def function[release, parameter[]]:
constant[Bump version, tag, build, gen docs.]
if call[name[check_staged], parameter[]] begin[:]
<ast.Raise object at 0x7da18eb579a0>
if call[name[check_unstaged], parameter[]] begin[:]
<ast.Raise object at 0x7da18eb54160>
call[name[bump], parameter[]]
call[name[tag], parameter[]]
call[name[build], parameter[]]
call[name[doc_gen], parameter[]]
call[name[puts], parameter[call[name[colored].yellow, parameter[constant[Remember to upload documentation and package:]]]]]
with call[name[indent], parameter[constant[2]]] begin[:]
call[name[puts], parameter[call[name[colored].cyan, parameter[constant[shovel doc.upload]]]]]
call[name[puts], parameter[call[name[colored].cyan, parameter[constant[shovel version.upload]]]]] | keyword[def] identifier[release] ():
literal[string]
keyword[if] identifier[check_staged] ():
keyword[raise] identifier[EnvironmentError] ( literal[string] )
keyword[if] identifier[check_unstaged] ():
keyword[raise] identifier[EnvironmentError] ( literal[string] )
identifier[bump] ()
identifier[tag] ()
identifier[build] ()
identifier[doc_gen] ()
identifier[puts] ( identifier[colored] . identifier[yellow] ( literal[string] ))
keyword[with] identifier[indent] ( literal[int] ):
identifier[puts] ( identifier[colored] . identifier[cyan] ( literal[string] ))
identifier[puts] ( identifier[colored] . identifier[cyan] ( literal[string] )) | def release():
"""Bump version, tag, build, gen docs."""
if check_staged():
raise EnvironmentError('There are staged changes, abort.') # depends on [control=['if'], data=[]]
if check_unstaged():
raise EnvironmentError('There are unstaged changes, abort.') # depends on [control=['if'], data=[]]
bump()
tag()
build()
doc_gen()
puts(colored.yellow('Remember to upload documentation and package:'))
with indent(2):
puts(colored.cyan('shovel doc.upload'))
puts(colored.cyan('shovel version.upload')) # depends on [control=['with'], data=[]] |
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'") | def function[p_retry_option, parameter[p]]:
constant[
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[4]] begin[:]
call[name[p]][constant[0]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a237f0>], [<ast.Call object at 0x7da1b0a219f0>]] | keyword[def] identifier[p_retry_option] ( identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]={ literal[string] : identifier[int] ( identifier[p] [ literal[int] ])}
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]={ literal[string] : identifier[Delay] ( identifier[int] ( identifier[p] [ literal[int] ]), identifier[p] [ literal[int] ])}
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {'limit': int(p[3])} # depends on [control=['if'], data=[]]
elif len(p) == 7:
p[0] = {'delay': Delay(int(p[5]), p[3])} # depends on [control=['if'], data=[]]
else:
raise RuntimeError("Invalid production in 'retry_option'") |
def to_matrix(self, n_qubits=-1):
"""Convert to the matrix."""
if n_qubits == -1:
n_qubits = self.max_n() + 1
mat = I.to_matrix(n_qubits)
for op in self.ops:
if op.is_identity:
continue
mat = mat @ op.to_matrix(n_qubits)
return mat * self.coeff | def function[to_matrix, parameter[self, n_qubits]]:
constant[Convert to the matrix.]
if compare[name[n_qubits] equal[==] <ast.UnaryOp object at 0x7da2049608b0>] begin[:]
variable[n_qubits] assign[=] binary_operation[call[name[self].max_n, parameter[]] + constant[1]]
variable[mat] assign[=] call[name[I].to_matrix, parameter[name[n_qubits]]]
for taget[name[op]] in starred[name[self].ops] begin[:]
if name[op].is_identity begin[:]
continue
variable[mat] assign[=] binary_operation[name[mat] <ast.MatMult object at 0x7da2590d6860> call[name[op].to_matrix, parameter[name[n_qubits]]]]
return[binary_operation[name[mat] * name[self].coeff]] | keyword[def] identifier[to_matrix] ( identifier[self] , identifier[n_qubits] =- literal[int] ):
literal[string]
keyword[if] identifier[n_qubits] ==- literal[int] :
identifier[n_qubits] = identifier[self] . identifier[max_n] ()+ literal[int]
identifier[mat] = identifier[I] . identifier[to_matrix] ( identifier[n_qubits] )
keyword[for] identifier[op] keyword[in] identifier[self] . identifier[ops] :
keyword[if] identifier[op] . identifier[is_identity] :
keyword[continue]
identifier[mat] = identifier[mat] @ identifier[op] . identifier[to_matrix] ( identifier[n_qubits] )
keyword[return] identifier[mat] * identifier[self] . identifier[coeff] | def to_matrix(self, n_qubits=-1):
"""Convert to the matrix."""
if n_qubits == -1:
n_qubits = self.max_n() + 1 # depends on [control=['if'], data=['n_qubits']]
mat = I.to_matrix(n_qubits)
for op in self.ops:
if op.is_identity:
continue # depends on [control=['if'], data=[]]
mat = mat @ op.to_matrix(n_qubits) # depends on [control=['for'], data=['op']]
return mat * self.coeff |
def _horizontalShift(self):
"""List should be plased such way, that typed text in the list is under
typed text in the editor
"""
strangeAdjustment = 2 # I don't know why. Probably, won't work on other systems and versions
return self.fontMetrics().width(self.model().typedText()) + strangeAdjustment | def function[_horizontalShift, parameter[self]]:
constant[List should be plased such way, that typed text in the list is under
typed text in the editor
]
variable[strangeAdjustment] assign[=] constant[2]
return[binary_operation[call[call[name[self].fontMetrics, parameter[]].width, parameter[call[call[name[self].model, parameter[]].typedText, parameter[]]]] + name[strangeAdjustment]]] | keyword[def] identifier[_horizontalShift] ( identifier[self] ):
literal[string]
identifier[strangeAdjustment] = literal[int]
keyword[return] identifier[self] . identifier[fontMetrics] (). identifier[width] ( identifier[self] . identifier[model] (). identifier[typedText] ())+ identifier[strangeAdjustment] | def _horizontalShift(self):
"""List should be plased such way, that typed text in the list is under
typed text in the editor
"""
strangeAdjustment = 2 # I don't know why. Probably, won't work on other systems and versions
return self.fontMetrics().width(self.model().typedText()) + strangeAdjustment |
def _norm_default(x):
"""Default Euclidean norm implementation."""
# Lazy import to improve `import odl` time
import scipy.linalg
if _blas_is_applicable(x.data):
nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype)
norm = partial(nrm2, n=native(x.size))
else:
norm = np.linalg.norm
return norm(x.data.ravel()) | def function[_norm_default, parameter[x]]:
constant[Default Euclidean norm implementation.]
import module[scipy.linalg]
if call[name[_blas_is_applicable], parameter[name[x].data]] begin[:]
variable[nrm2] assign[=] call[name[scipy].linalg.blas.get_blas_funcs, parameter[constant[nrm2]]]
variable[norm] assign[=] call[name[partial], parameter[name[nrm2]]]
return[call[name[norm], parameter[call[name[x].data.ravel, parameter[]]]]] | keyword[def] identifier[_norm_default] ( identifier[x] ):
literal[string]
keyword[import] identifier[scipy] . identifier[linalg]
keyword[if] identifier[_blas_is_applicable] ( identifier[x] . identifier[data] ):
identifier[nrm2] = identifier[scipy] . identifier[linalg] . identifier[blas] . identifier[get_blas_funcs] ( literal[string] , identifier[dtype] = identifier[x] . identifier[dtype] )
identifier[norm] = identifier[partial] ( identifier[nrm2] , identifier[n] = identifier[native] ( identifier[x] . identifier[size] ))
keyword[else] :
identifier[norm] = identifier[np] . identifier[linalg] . identifier[norm]
keyword[return] identifier[norm] ( identifier[x] . identifier[data] . identifier[ravel] ()) | def _norm_default(x):
"""Default Euclidean norm implementation."""
# Lazy import to improve `import odl` time
import scipy.linalg
if _blas_is_applicable(x.data):
nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype)
norm = partial(nrm2, n=native(x.size)) # depends on [control=['if'], data=[]]
else:
norm = np.linalg.norm
return norm(x.data.ravel()) |
def solve_semi_dual(a, b, C, regul, method="L-BFGS-B", tol=1e-3, max_iter=500,
verbose=False):
"""
Solve the "smoothed" semi-dual objective.
Parameters
----------
a: array, shape = len(a)
b: array, shape = len(b)
Input histograms (should be non-negative and sum to 1).
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a max_Omega(X) method.
method: str
Solver to be used (passed to `scipy.optimize.minimize`).
tol: float
Tolerance parameter.
max_iter: int
Maximum number of iterations.
Returns
-------
alpha: array, shape = len(a)
Semi-dual potentials.
"""
def _func(alpha):
obj, grad = semi_dual_obj_grad(alpha, a, b, C, regul)
# We need to maximize the semi-dual.
return -obj, -grad
alpha_init = np.zeros(len(a))
res = minimize(_func, alpha_init, method=method, jac=True,
tol=tol, options=dict(maxiter=max_iter, disp=verbose))
return res.x, res | def function[solve_semi_dual, parameter[a, b, C, regul, method, tol, max_iter, verbose]]:
constant[
Solve the "smoothed" semi-dual objective.
Parameters
----------
a: array, shape = len(a)
b: array, shape = len(b)
Input histograms (should be non-negative and sum to 1).
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a max_Omega(X) method.
method: str
Solver to be used (passed to `scipy.optimize.minimize`).
tol: float
Tolerance parameter.
max_iter: int
Maximum number of iterations.
Returns
-------
alpha: array, shape = len(a)
Semi-dual potentials.
]
def function[_func, parameter[alpha]]:
<ast.Tuple object at 0x7da1b163f7f0> assign[=] call[name[semi_dual_obj_grad], parameter[name[alpha], name[a], name[b], name[C], name[regul]]]
return[tuple[[<ast.UnaryOp object at 0x7da1b163fd00>, <ast.UnaryOp object at 0x7da1b163fcd0>]]]
variable[alpha_init] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[a]]]]]
variable[res] assign[=] call[name[minimize], parameter[name[_func], name[alpha_init]]]
return[tuple[[<ast.Attribute object at 0x7da1b163b7c0>, <ast.Name object at 0x7da1b163b2e0>]]] | keyword[def] identifier[solve_semi_dual] ( identifier[a] , identifier[b] , identifier[C] , identifier[regul] , identifier[method] = literal[string] , identifier[tol] = literal[int] , identifier[max_iter] = literal[int] ,
identifier[verbose] = keyword[False] ):
literal[string]
keyword[def] identifier[_func] ( identifier[alpha] ):
identifier[obj] , identifier[grad] = identifier[semi_dual_obj_grad] ( identifier[alpha] , identifier[a] , identifier[b] , identifier[C] , identifier[regul] )
keyword[return] - identifier[obj] ,- identifier[grad]
identifier[alpha_init] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[a] ))
identifier[res] = identifier[minimize] ( identifier[_func] , identifier[alpha_init] , identifier[method] = identifier[method] , identifier[jac] = keyword[True] ,
identifier[tol] = identifier[tol] , identifier[options] = identifier[dict] ( identifier[maxiter] = identifier[max_iter] , identifier[disp] = identifier[verbose] ))
keyword[return] identifier[res] . identifier[x] , identifier[res] | def solve_semi_dual(a, b, C, regul, method='L-BFGS-B', tol=0.001, max_iter=500, verbose=False):
"""
Solve the "smoothed" semi-dual objective.
Parameters
----------
a: array, shape = len(a)
b: array, shape = len(b)
Input histograms (should be non-negative and sum to 1).
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a max_Omega(X) method.
method: str
Solver to be used (passed to `scipy.optimize.minimize`).
tol: float
Tolerance parameter.
max_iter: int
Maximum number of iterations.
Returns
-------
alpha: array, shape = len(a)
Semi-dual potentials.
"""
def _func(alpha):
(obj, grad) = semi_dual_obj_grad(alpha, a, b, C, regul)
# We need to maximize the semi-dual.
return (-obj, -grad)
alpha_init = np.zeros(len(a))
res = minimize(_func, alpha_init, method=method, jac=True, tol=tol, options=dict(maxiter=max_iter, disp=verbose))
return (res.x, res) |
def retire(self, did):
"""
Retire this did of Aquarius
:param did: DID, str
:return: bool
"""
try:
ddo = self.resolve(did)
metadata_service = ddo.find_service_by_type(ServiceTypes.METADATA)
self._get_aquarius(metadata_service.endpoints.service).retire_asset_ddo(did)
return True
except AquariusGenericError as err:
logger.error(err)
return False | def function[retire, parameter[self, did]]:
constant[
Retire this did of Aquarius
:param did: DID, str
:return: bool
]
<ast.Try object at 0x7da204344b20> | keyword[def] identifier[retire] ( identifier[self] , identifier[did] ):
literal[string]
keyword[try] :
identifier[ddo] = identifier[self] . identifier[resolve] ( identifier[did] )
identifier[metadata_service] = identifier[ddo] . identifier[find_service_by_type] ( identifier[ServiceTypes] . identifier[METADATA] )
identifier[self] . identifier[_get_aquarius] ( identifier[metadata_service] . identifier[endpoints] . identifier[service] ). identifier[retire_asset_ddo] ( identifier[did] )
keyword[return] keyword[True]
keyword[except] identifier[AquariusGenericError] keyword[as] identifier[err] :
identifier[logger] . identifier[error] ( identifier[err] )
keyword[return] keyword[False] | def retire(self, did):
"""
Retire this did of Aquarius
:param did: DID, str
:return: bool
"""
try:
ddo = self.resolve(did)
metadata_service = ddo.find_service_by_type(ServiceTypes.METADATA)
self._get_aquarius(metadata_service.endpoints.service).retire_asset_ddo(did)
return True # depends on [control=['try'], data=[]]
except AquariusGenericError as err:
logger.error(err)
return False # depends on [control=['except'], data=['err']] |
def market_exact(self, session, start_time: str, end_time: str) -> Session:
"""
Explicitly specify start time and end time
Args:
session: predefined session
start_time: start time in terms of HHMM string
end_time: end time in terms of HHMM string
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
ss = self.exch[session]
same_day = ss[0] < ss[-1]
if not start_time: s_time = ss[0]
else:
s_time = param.to_hour(start_time)
if same_day: s_time = max(s_time, ss[0])
if not end_time: e_time = ss[-1]
else:
e_time = param.to_hour(end_time)
if same_day: e_time = min(e_time, ss[-1])
if same_day and (s_time > e_time): return SessNA
return Session(start_time=s_time, end_time=e_time) | def function[market_exact, parameter[self, session, start_time, end_time]]:
constant[
Explicitly specify start time and end time
Args:
session: predefined session
start_time: start time in terms of HHMM string
end_time: end time in terms of HHMM string
Returns:
Session of start_time and end_time
]
if compare[name[session] <ast.NotIn object at 0x7da2590d7190> name[self].exch] begin[:]
return[name[SessNA]]
variable[ss] assign[=] call[name[self].exch][name[session]]
variable[same_day] assign[=] compare[call[name[ss]][constant[0]] less[<] call[name[ss]][<ast.UnaryOp object at 0x7da1b012f010>]]
if <ast.UnaryOp object at 0x7da1b012f400> begin[:]
variable[s_time] assign[=] call[name[ss]][constant[0]]
if <ast.UnaryOp object at 0x7da1b012d660> begin[:]
variable[e_time] assign[=] call[name[ss]][<ast.UnaryOp object at 0x7da1b012dcc0>]
if <ast.BoolOp object at 0x7da18f09e020> begin[:]
return[name[SessNA]]
return[call[name[Session], parameter[]]] | keyword[def] identifier[market_exact] ( identifier[self] , identifier[session] , identifier[start_time] : identifier[str] , identifier[end_time] : identifier[str] )-> identifier[Session] :
literal[string]
keyword[if] identifier[session] keyword[not] keyword[in] identifier[self] . identifier[exch] : keyword[return] identifier[SessNA]
identifier[ss] = identifier[self] . identifier[exch] [ identifier[session] ]
identifier[same_day] = identifier[ss] [ literal[int] ]< identifier[ss] [- literal[int] ]
keyword[if] keyword[not] identifier[start_time] : identifier[s_time] = identifier[ss] [ literal[int] ]
keyword[else] :
identifier[s_time] = identifier[param] . identifier[to_hour] ( identifier[start_time] )
keyword[if] identifier[same_day] : identifier[s_time] = identifier[max] ( identifier[s_time] , identifier[ss] [ literal[int] ])
keyword[if] keyword[not] identifier[end_time] : identifier[e_time] = identifier[ss] [- literal[int] ]
keyword[else] :
identifier[e_time] = identifier[param] . identifier[to_hour] ( identifier[end_time] )
keyword[if] identifier[same_day] : identifier[e_time] = identifier[min] ( identifier[e_time] , identifier[ss] [- literal[int] ])
keyword[if] identifier[same_day] keyword[and] ( identifier[s_time] > identifier[e_time] ): keyword[return] identifier[SessNA]
keyword[return] identifier[Session] ( identifier[start_time] = identifier[s_time] , identifier[end_time] = identifier[e_time] ) | def market_exact(self, session, start_time: str, end_time: str) -> Session:
"""
Explicitly specify start time and end time
Args:
session: predefined session
start_time: start time in terms of HHMM string
end_time: end time in terms of HHMM string
Returns:
Session of start_time and end_time
"""
if session not in self.exch:
return SessNA # depends on [control=['if'], data=[]]
ss = self.exch[session]
same_day = ss[0] < ss[-1]
if not start_time:
s_time = ss[0] # depends on [control=['if'], data=[]]
else:
s_time = param.to_hour(start_time)
if same_day:
s_time = max(s_time, ss[0]) # depends on [control=['if'], data=[]]
if not end_time:
e_time = ss[-1] # depends on [control=['if'], data=[]]
else:
e_time = param.to_hour(end_time)
if same_day:
e_time = min(e_time, ss[-1]) # depends on [control=['if'], data=[]]
if same_day and s_time > e_time:
return SessNA # depends on [control=['if'], data=[]]
return Session(start_time=s_time, end_time=e_time) |
def can_cloak_as(user, other_user):
"""
Returns true if `user` can cloak as `other_user`
"""
# check to see if the user is allowed to do this
can_cloak = False
try:
can_cloak = user.can_cloak_as(other_user)
except AttributeError as e:
try:
can_cloak = user.is_staff
except AttributeError as e:
pass
return can_cloak | def function[can_cloak_as, parameter[user, other_user]]:
constant[
Returns true if `user` can cloak as `other_user`
]
variable[can_cloak] assign[=] constant[False]
<ast.Try object at 0x7da204963550>
return[name[can_cloak]] | keyword[def] identifier[can_cloak_as] ( identifier[user] , identifier[other_user] ):
literal[string]
identifier[can_cloak] = keyword[False]
keyword[try] :
identifier[can_cloak] = identifier[user] . identifier[can_cloak_as] ( identifier[other_user] )
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[try] :
identifier[can_cloak] = identifier[user] . identifier[is_staff]
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[pass]
keyword[return] identifier[can_cloak] | def can_cloak_as(user, other_user):
"""
Returns true if `user` can cloak as `other_user`
"""
# check to see if the user is allowed to do this
can_cloak = False
try:
can_cloak = user.can_cloak_as(other_user) # depends on [control=['try'], data=[]]
except AttributeError as e:
try:
can_cloak = user.is_staff # depends on [control=['try'], data=[]]
except AttributeError as e:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
return can_cloak |
def BROADCAST_FILTER_NOT(func):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs) | def function[BROADCAST_FILTER_NOT, parameter[func]]:
constant[
Composes the passed filters into an and-joined filter.
]
return[<ast.Lambda object at 0x7da1b2345660>] | keyword[def] identifier[BROADCAST_FILTER_NOT] ( identifier[func] ):
literal[string]
keyword[return] keyword[lambda] identifier[u] , identifier[command] ,* identifier[args] ,** identifier[kwargs] : keyword[not] identifier[func] ( identifier[u] , identifier[command] ,* identifier[args] ,** identifier[kwargs] ) | def BROADCAST_FILTER_NOT(func):
"""
Composes the passed filters into an and-joined filter.
"""
return lambda u, command, *args, **kwargs: not func(u, command, *args, **kwargs) |
def reaction_formula(reaction, compound_formula):
"""Calculate formula compositions for both sides of the specified reaction.
If the compounds in the reaction all have formula, then calculate and
return the chemical compositions for both sides, otherwise return `None`.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_formula: a map from compound id to formula.
"""
def multiply_formula(compound_list):
for compound, count in compound_list:
yield count * compound_formula[compound.name]
for compound, _ in reaction.compounds:
if compound.name not in compound_formula:
return None
else:
left_form = reduce(
operator.or_, multiply_formula(reaction.left), Formula())
right_form = reduce(
operator.or_, multiply_formula(reaction.right), Formula())
return left_form, right_form | def function[reaction_formula, parameter[reaction, compound_formula]]:
constant[Calculate formula compositions for both sides of the specified reaction.
If the compounds in the reaction all have formula, then calculate and
return the chemical compositions for both sides, otherwise return `None`.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_formula: a map from compound id to formula.
]
def function[multiply_formula, parameter[compound_list]]:
for taget[tuple[[<ast.Name object at 0x7da18f8136a0>, <ast.Name object at 0x7da18f8126b0>]]] in starred[name[compound_list]] begin[:]
<ast.Yield object at 0x7da18f810850>
for taget[tuple[[<ast.Name object at 0x7da18f811f60>, <ast.Name object at 0x7da18f811f30>]]] in starred[name[reaction].compounds] begin[:]
if compare[name[compound].name <ast.NotIn object at 0x7da2590d7190> name[compound_formula]] begin[:]
return[constant[None]]
return[tuple[[<ast.Name object at 0x7da20e9b2680>, <ast.Name object at 0x7da20e9b1570>]]] | keyword[def] identifier[reaction_formula] ( identifier[reaction] , identifier[compound_formula] ):
literal[string]
keyword[def] identifier[multiply_formula] ( identifier[compound_list] ):
keyword[for] identifier[compound] , identifier[count] keyword[in] identifier[compound_list] :
keyword[yield] identifier[count] * identifier[compound_formula] [ identifier[compound] . identifier[name] ]
keyword[for] identifier[compound] , identifier[_] keyword[in] identifier[reaction] . identifier[compounds] :
keyword[if] identifier[compound] . identifier[name] keyword[not] keyword[in] identifier[compound_formula] :
keyword[return] keyword[None]
keyword[else] :
identifier[left_form] = identifier[reduce] (
identifier[operator] . identifier[or_] , identifier[multiply_formula] ( identifier[reaction] . identifier[left] ), identifier[Formula] ())
identifier[right_form] = identifier[reduce] (
identifier[operator] . identifier[or_] , identifier[multiply_formula] ( identifier[reaction] . identifier[right] ), identifier[Formula] ())
keyword[return] identifier[left_form] , identifier[right_form] | def reaction_formula(reaction, compound_formula):
"""Calculate formula compositions for both sides of the specified reaction.
If the compounds in the reaction all have formula, then calculate and
return the chemical compositions for both sides, otherwise return `None`.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_formula: a map from compound id to formula.
"""
def multiply_formula(compound_list):
for (compound, count) in compound_list:
yield (count * compound_formula[compound.name]) # depends on [control=['for'], data=[]]
for (compound, _) in reaction.compounds:
if compound.name not in compound_formula:
return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
left_form = reduce(operator.or_, multiply_formula(reaction.left), Formula())
right_form = reduce(operator.or_, multiply_formula(reaction.right), Formula())
return (left_form, right_form) |
def generate_results_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to manipulate CSV
results data."""
parser = subparsers.add_parser(
'results', description=constants.RESULTS_DESCRIPTION,
epilog=constants.RESULTS_EPILOG, formatter_class=ParagraphFormatter,
help=constants.RESULTS_HELP)
utils.add_common_arguments(parser)
parser.set_defaults(func=results)
be_group = parser.add_argument_group('bifurcated extend')
be_group.add_argument('-b', '--bifurcated-extend',
dest='bifurcated_extend', metavar='CORPUS',
help=constants.RESULTS_BIFURCATED_EXTEND_HELP)
be_group.add_argument('--max-be-count', dest='bifurcated_extend_size',
help=constants.RESULTS_BIFURCATED_EXTEND_MAX_HELP,
metavar='COUNT', type=int)
parser.add_argument('-e', '--extend', dest='extend',
help=constants.RESULTS_EXTEND_HELP, metavar='CORPUS')
parser.add_argument('--excise', help=constants.RESULTS_EXCISE_HELP,
metavar='NGRAM', type=str)
parser.add_argument('-l', '--label', dest='label',
help=constants.RESULTS_LABEL_HELP, metavar='LABEL')
parser.add_argument('--min-count', dest='min_count',
help=constants.RESULTS_MINIMUM_COUNT_HELP,
metavar='COUNT', type=int)
parser.add_argument('--max-count', dest='max_count',
help=constants.RESULTS_MAXIMUM_COUNT_HELP,
metavar='COUNT', type=int)
parser.add_argument('--min-count-work', dest='min_count_work',
help=constants.RESULTS_MINIMUM_COUNT_WORK_HELP,
metavar='COUNT', type=int)
parser.add_argument('--max-count-work', dest='max_count_work',
help=constants.RESULTS_MAXIMUM_COUNT_WORK_HELP,
metavar='COUNT', type=int)
parser.add_argument('--min-size', dest='min_size',
help=constants.RESULTS_MINIMUM_SIZE_HELP,
metavar='SIZE', type=int)
parser.add_argument('--max-size', dest='max_size',
help=constants.RESULTS_MAXIMUM_SIZE_HELP,
metavar='SIZE', type=int)
parser.add_argument('--min-works', dest='min_works',
help=constants.RESULTS_MINIMUM_WORK_HELP,
metavar='COUNT', type=int)
parser.add_argument('--max-works', dest='max_works',
help=constants.RESULTS_MAXIMUM_WORK_HELP,
metavar='COUNT', type=int)
parser.add_argument('--ngrams', dest='ngrams',
help=constants.RESULTS_NGRAMS_HELP, metavar='NGRAMS')
parser.add_argument('--reciprocal', action='store_true',
help=constants.RESULTS_RECIPROCAL_HELP)
parser.add_argument('--reduce', action='store_true',
help=constants.RESULTS_REDUCE_HELP)
parser.add_argument('--relabel', help=constants.RESULTS_RELABEL_HELP,
metavar='CATALOGUE')
parser.add_argument('--remove', help=constants.RESULTS_REMOVE_HELP,
metavar='LABEL', type=str)
parser.add_argument('--sort', action='store_true',
help=constants.RESULTS_SORT_HELP)
utils.add_tokenizer_argument(parser)
parser.add_argument('-z', '--zero-fill', dest='zero_fill',
help=constants.RESULTS_ZERO_FILL_HELP,
metavar='CORPUS')
parser.add_argument('results', help=constants.RESULTS_RESULTS_HELP,
metavar='RESULTS')
unsafe_group = parser.add_argument_group(
constants.RESULTS_UNSAFE_GROUP_TITLE,
constants.RESULTS_UNSAFE_GROUP_DESCRIPTION)
unsafe_group.add_argument('--add-label-count', action='store_true',
help=constants.RESULTS_ADD_LABEL_COUNT_HELP)
unsafe_group.add_argument('--add-label-work-count', action='store_true',
help=constants.RESULTS_ADD_LABEL_WORK_COUNT_HELP)
unsafe_group.add_argument('--collapse-witnesses', action='store_true',
help=constants.RESULTS_COLLAPSE_WITNESSES_HELP)
unsafe_group.add_argument('--group-by-ngram', dest='group_by_ngram',
help=constants.RESULTS_GROUP_BY_NGRAM_HELP,
metavar='CATALOGUE')
unsafe_group.add_argument('--group-by-witness', action='store_true',
help=constants.RESULTS_GROUP_BY_WITNESS_HELP) | def function[generate_results_subparser, parameter[subparsers]]:
constant[Adds a sub-command parser to `subparsers` to manipulate CSV
results data.]
variable[parser] assign[=] call[name[subparsers].add_parser, parameter[constant[results]]]
call[name[utils].add_common_arguments, parameter[name[parser]]]
call[name[parser].set_defaults, parameter[]]
variable[be_group] assign[=] call[name[parser].add_argument_group, parameter[constant[bifurcated extend]]]
call[name[be_group].add_argument, parameter[constant[-b], constant[--bifurcated-extend]]]
call[name[be_group].add_argument, parameter[constant[--max-be-count]]]
call[name[parser].add_argument, parameter[constant[-e], constant[--extend]]]
call[name[parser].add_argument, parameter[constant[--excise]]]
call[name[parser].add_argument, parameter[constant[-l], constant[--label]]]
call[name[parser].add_argument, parameter[constant[--min-count]]]
call[name[parser].add_argument, parameter[constant[--max-count]]]
call[name[parser].add_argument, parameter[constant[--min-count-work]]]
call[name[parser].add_argument, parameter[constant[--max-count-work]]]
call[name[parser].add_argument, parameter[constant[--min-size]]]
call[name[parser].add_argument, parameter[constant[--max-size]]]
call[name[parser].add_argument, parameter[constant[--min-works]]]
call[name[parser].add_argument, parameter[constant[--max-works]]]
call[name[parser].add_argument, parameter[constant[--ngrams]]]
call[name[parser].add_argument, parameter[constant[--reciprocal]]]
call[name[parser].add_argument, parameter[constant[--reduce]]]
call[name[parser].add_argument, parameter[constant[--relabel]]]
call[name[parser].add_argument, parameter[constant[--remove]]]
call[name[parser].add_argument, parameter[constant[--sort]]]
call[name[utils].add_tokenizer_argument, parameter[name[parser]]]
call[name[parser].add_argument, parameter[constant[-z], constant[--zero-fill]]]
call[name[parser].add_argument, parameter[constant[results]]]
variable[unsafe_group] assign[=] call[name[parser].add_argument_group, parameter[name[constants].RESULTS_UNSAFE_GROUP_TITLE, name[constants].RESULTS_UNSAFE_GROUP_DESCRIPTION]]
call[name[unsafe_group].add_argument, parameter[constant[--add-label-count]]]
call[name[unsafe_group].add_argument, parameter[constant[--add-label-work-count]]]
call[name[unsafe_group].add_argument, parameter[constant[--collapse-witnesses]]]
call[name[unsafe_group].add_argument, parameter[constant[--group-by-ngram]]]
call[name[unsafe_group].add_argument, parameter[constant[--group-by-witness]]] | keyword[def] identifier[generate_results_subparser] ( identifier[subparsers] ):
literal[string]
identifier[parser] = identifier[subparsers] . identifier[add_parser] (
literal[string] , identifier[description] = identifier[constants] . identifier[RESULTS_DESCRIPTION] ,
identifier[epilog] = identifier[constants] . identifier[RESULTS_EPILOG] , identifier[formatter_class] = identifier[ParagraphFormatter] ,
identifier[help] = identifier[constants] . identifier[RESULTS_HELP] )
identifier[utils] . identifier[add_common_arguments] ( identifier[parser] )
identifier[parser] . identifier[set_defaults] ( identifier[func] = identifier[results] )
identifier[be_group] = identifier[parser] . identifier[add_argument_group] ( literal[string] )
identifier[be_group] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_BIFURCATED_EXTEND_HELP] )
identifier[be_group] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_BIFURCATED_EXTEND_MAX_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_EXTEND_HELP] , identifier[metavar] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = identifier[constants] . identifier[RESULTS_EXCISE_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[str] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_LABEL_HELP] , identifier[metavar] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MINIMUM_COUNT_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MAXIMUM_COUNT_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MINIMUM_COUNT_WORK_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MAXIMUM_COUNT_WORK_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MINIMUM_SIZE_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MAXIMUM_SIZE_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MINIMUM_WORK_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_MAXIMUM_WORK_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[int] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_NGRAMS_HELP] , identifier[metavar] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_RECIPROCAL_HELP] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_REDUCE_HELP] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = identifier[constants] . identifier[RESULTS_RELABEL_HELP] ,
identifier[metavar] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = identifier[constants] . identifier[RESULTS_REMOVE_HELP] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[str] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_SORT_HELP] )
identifier[utils] . identifier[add_tokenizer_argument] ( identifier[parser] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_ZERO_FILL_HELP] ,
identifier[metavar] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = identifier[constants] . identifier[RESULTS_RESULTS_HELP] ,
identifier[metavar] = literal[string] )
identifier[unsafe_group] = identifier[parser] . identifier[add_argument_group] (
identifier[constants] . identifier[RESULTS_UNSAFE_GROUP_TITLE] ,
identifier[constants] . identifier[RESULTS_UNSAFE_GROUP_DESCRIPTION] )
identifier[unsafe_group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_ADD_LABEL_COUNT_HELP] )
identifier[unsafe_group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_ADD_LABEL_WORK_COUNT_HELP] )
identifier[unsafe_group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_COLLAPSE_WITNESSES_HELP] )
identifier[unsafe_group] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_GROUP_BY_NGRAM_HELP] ,
identifier[metavar] = literal[string] )
identifier[unsafe_group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = identifier[constants] . identifier[RESULTS_GROUP_BY_WITNESS_HELP] ) | def generate_results_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to manipulate CSV
results data."""
parser = subparsers.add_parser('results', description=constants.RESULTS_DESCRIPTION, epilog=constants.RESULTS_EPILOG, formatter_class=ParagraphFormatter, help=constants.RESULTS_HELP)
utils.add_common_arguments(parser)
parser.set_defaults(func=results)
be_group = parser.add_argument_group('bifurcated extend')
be_group.add_argument('-b', '--bifurcated-extend', dest='bifurcated_extend', metavar='CORPUS', help=constants.RESULTS_BIFURCATED_EXTEND_HELP)
be_group.add_argument('--max-be-count', dest='bifurcated_extend_size', help=constants.RESULTS_BIFURCATED_EXTEND_MAX_HELP, metavar='COUNT', type=int)
parser.add_argument('-e', '--extend', dest='extend', help=constants.RESULTS_EXTEND_HELP, metavar='CORPUS')
parser.add_argument('--excise', help=constants.RESULTS_EXCISE_HELP, metavar='NGRAM', type=str)
parser.add_argument('-l', '--label', dest='label', help=constants.RESULTS_LABEL_HELP, metavar='LABEL')
parser.add_argument('--min-count', dest='min_count', help=constants.RESULTS_MINIMUM_COUNT_HELP, metavar='COUNT', type=int)
parser.add_argument('--max-count', dest='max_count', help=constants.RESULTS_MAXIMUM_COUNT_HELP, metavar='COUNT', type=int)
parser.add_argument('--min-count-work', dest='min_count_work', help=constants.RESULTS_MINIMUM_COUNT_WORK_HELP, metavar='COUNT', type=int)
parser.add_argument('--max-count-work', dest='max_count_work', help=constants.RESULTS_MAXIMUM_COUNT_WORK_HELP, metavar='COUNT', type=int)
parser.add_argument('--min-size', dest='min_size', help=constants.RESULTS_MINIMUM_SIZE_HELP, metavar='SIZE', type=int)
parser.add_argument('--max-size', dest='max_size', help=constants.RESULTS_MAXIMUM_SIZE_HELP, metavar='SIZE', type=int)
parser.add_argument('--min-works', dest='min_works', help=constants.RESULTS_MINIMUM_WORK_HELP, metavar='COUNT', type=int)
parser.add_argument('--max-works', dest='max_works', help=constants.RESULTS_MAXIMUM_WORK_HELP, metavar='COUNT', type=int)
parser.add_argument('--ngrams', dest='ngrams', help=constants.RESULTS_NGRAMS_HELP, metavar='NGRAMS')
parser.add_argument('--reciprocal', action='store_true', help=constants.RESULTS_RECIPROCAL_HELP)
parser.add_argument('--reduce', action='store_true', help=constants.RESULTS_REDUCE_HELP)
parser.add_argument('--relabel', help=constants.RESULTS_RELABEL_HELP, metavar='CATALOGUE')
parser.add_argument('--remove', help=constants.RESULTS_REMOVE_HELP, metavar='LABEL', type=str)
parser.add_argument('--sort', action='store_true', help=constants.RESULTS_SORT_HELP)
utils.add_tokenizer_argument(parser)
parser.add_argument('-z', '--zero-fill', dest='zero_fill', help=constants.RESULTS_ZERO_FILL_HELP, metavar='CORPUS')
parser.add_argument('results', help=constants.RESULTS_RESULTS_HELP, metavar='RESULTS')
unsafe_group = parser.add_argument_group(constants.RESULTS_UNSAFE_GROUP_TITLE, constants.RESULTS_UNSAFE_GROUP_DESCRIPTION)
unsafe_group.add_argument('--add-label-count', action='store_true', help=constants.RESULTS_ADD_LABEL_COUNT_HELP)
unsafe_group.add_argument('--add-label-work-count', action='store_true', help=constants.RESULTS_ADD_LABEL_WORK_COUNT_HELP)
unsafe_group.add_argument('--collapse-witnesses', action='store_true', help=constants.RESULTS_COLLAPSE_WITNESSES_HELP)
unsafe_group.add_argument('--group-by-ngram', dest='group_by_ngram', help=constants.RESULTS_GROUP_BY_NGRAM_HELP, metavar='CATALOGUE')
unsafe_group.add_argument('--group-by-witness', action='store_true', help=constants.RESULTS_GROUP_BY_WITNESS_HELP) |
def run(sniffer_instance=None, wait_time=0.5, clear=True, args=(),
debug=False):
"""
Runs the auto tester loop. Internally, the runner instanciates the sniffer_cls and
scanner class.
``sniffer_instance`` The class to run. Usually this is set to but a subclass of scanner.
Defaults to Sniffer. Sniffer class documentation for more information.
``wait_time`` The time, in seconds, to wait between polls. This is dependent on
the underlying scanner implementation. OS-specific libraries may choose
to ignore this parameter. Defaults to 0.5 seconds.
``clear`` Boolean. Set to True to clear the terminal before running the sniffer,
(alias, the unit tests). Defaults to True.
``args`` The arguments to pass to the sniffer/test runner. Defaults to ().
``debug`` Boolean. Sets the scanner and sniffer in debug mode, printing more internal
information. Defaults to False (and should usually be False).
"""
if sniffer_instance is None:
sniffer_instance = ScentSniffer()
if debug:
scanner = Scanner(
sniffer_instance.watch_paths,
scent=sniffer_instance.scent, logger=sys.stdout)
else:
scanner = Scanner(
sniffer_instance.watch_paths, scent=sniffer_instance.scent)
#sniffer = sniffer_cls(tuple(args), clear, debug)
sniffer_instance.set_up(tuple(args), clear, debug)
sniffer_instance.observe_scanner(scanner)
scanner.loop(wait_time) | def function[run, parameter[sniffer_instance, wait_time, clear, args, debug]]:
constant[
Runs the auto tester loop. Internally, the runner instanciates the sniffer_cls and
scanner class.
``sniffer_instance`` The class to run. Usually this is set to but a subclass of scanner.
Defaults to Sniffer. Sniffer class documentation for more information.
``wait_time`` The time, in seconds, to wait between polls. This is dependent on
the underlying scanner implementation. OS-specific libraries may choose
to ignore this parameter. Defaults to 0.5 seconds.
``clear`` Boolean. Set to True to clear the terminal before running the sniffer,
(alias, the unit tests). Defaults to True.
``args`` The arguments to pass to the sniffer/test runner. Defaults to ().
``debug`` Boolean. Sets the scanner and sniffer in debug mode, printing more internal
information. Defaults to False (and should usually be False).
]
if compare[name[sniffer_instance] is constant[None]] begin[:]
variable[sniffer_instance] assign[=] call[name[ScentSniffer], parameter[]]
if name[debug] begin[:]
variable[scanner] assign[=] call[name[Scanner], parameter[name[sniffer_instance].watch_paths]]
call[name[sniffer_instance].set_up, parameter[call[name[tuple], parameter[name[args]]], name[clear], name[debug]]]
call[name[sniffer_instance].observe_scanner, parameter[name[scanner]]]
call[name[scanner].loop, parameter[name[wait_time]]] | keyword[def] identifier[run] ( identifier[sniffer_instance] = keyword[None] , identifier[wait_time] = literal[int] , identifier[clear] = keyword[True] , identifier[args] =(),
identifier[debug] = keyword[False] ):
literal[string]
keyword[if] identifier[sniffer_instance] keyword[is] keyword[None] :
identifier[sniffer_instance] = identifier[ScentSniffer] ()
keyword[if] identifier[debug] :
identifier[scanner] = identifier[Scanner] (
identifier[sniffer_instance] . identifier[watch_paths] ,
identifier[scent] = identifier[sniffer_instance] . identifier[scent] , identifier[logger] = identifier[sys] . identifier[stdout] )
keyword[else] :
identifier[scanner] = identifier[Scanner] (
identifier[sniffer_instance] . identifier[watch_paths] , identifier[scent] = identifier[sniffer_instance] . identifier[scent] )
identifier[sniffer_instance] . identifier[set_up] ( identifier[tuple] ( identifier[args] ), identifier[clear] , identifier[debug] )
identifier[sniffer_instance] . identifier[observe_scanner] ( identifier[scanner] )
identifier[scanner] . identifier[loop] ( identifier[wait_time] ) | def run(sniffer_instance=None, wait_time=0.5, clear=True, args=(), debug=False):
"""
Runs the auto tester loop. Internally, the runner instanciates the sniffer_cls and
scanner class.
``sniffer_instance`` The class to run. Usually this is set to but a subclass of scanner.
Defaults to Sniffer. Sniffer class documentation for more information.
``wait_time`` The time, in seconds, to wait between polls. This is dependent on
the underlying scanner implementation. OS-specific libraries may choose
to ignore this parameter. Defaults to 0.5 seconds.
``clear`` Boolean. Set to True to clear the terminal before running the sniffer,
(alias, the unit tests). Defaults to True.
``args`` The arguments to pass to the sniffer/test runner. Defaults to ().
``debug`` Boolean. Sets the scanner and sniffer in debug mode, printing more internal
information. Defaults to False (and should usually be False).
"""
if sniffer_instance is None:
sniffer_instance = ScentSniffer() # depends on [control=['if'], data=['sniffer_instance']]
if debug:
scanner = Scanner(sniffer_instance.watch_paths, scent=sniffer_instance.scent, logger=sys.stdout) # depends on [control=['if'], data=[]]
else:
scanner = Scanner(sniffer_instance.watch_paths, scent=sniffer_instance.scent)
#sniffer = sniffer_cls(tuple(args), clear, debug)
sniffer_instance.set_up(tuple(args), clear, debug)
sniffer_instance.observe_scanner(scanner)
scanner.loop(wait_time) |
def is_suicide_or_check_by_dropping_pawn(self, move):
'''
Checks if the given move would move would leave the king in check or
put it into check.
'''
self.push(move)
is_suicide = self.was_suicide()
is_check_by_dropping_pawn = self.was_check_by_dropping_pawn(move)
self.pop()
return is_suicide or is_check_by_dropping_pawn | def function[is_suicide_or_check_by_dropping_pawn, parameter[self, move]]:
constant[
Checks if the given move would move would leave the king in check or
put it into check.
]
call[name[self].push, parameter[name[move]]]
variable[is_suicide] assign[=] call[name[self].was_suicide, parameter[]]
variable[is_check_by_dropping_pawn] assign[=] call[name[self].was_check_by_dropping_pawn, parameter[name[move]]]
call[name[self].pop, parameter[]]
return[<ast.BoolOp object at 0x7da1b032e170>] | keyword[def] identifier[is_suicide_or_check_by_dropping_pawn] ( identifier[self] , identifier[move] ):
literal[string]
identifier[self] . identifier[push] ( identifier[move] )
identifier[is_suicide] = identifier[self] . identifier[was_suicide] ()
identifier[is_check_by_dropping_pawn] = identifier[self] . identifier[was_check_by_dropping_pawn] ( identifier[move] )
identifier[self] . identifier[pop] ()
keyword[return] identifier[is_suicide] keyword[or] identifier[is_check_by_dropping_pawn] | def is_suicide_or_check_by_dropping_pawn(self, move):
"""
Checks if the given move would move would leave the king in check or
put it into check.
"""
self.push(move)
is_suicide = self.was_suicide()
is_check_by_dropping_pawn = self.was_check_by_dropping_pawn(move)
self.pop()
return is_suicide or is_check_by_dropping_pawn |
def backend_calibration(self, backend='ibmqx4', hub=None, access_token=None, user_id=None):
"""
Get the calibration of a real chip
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
raise CredentialsError('credentials invalid')
backend_type = self._check_backend(backend, 'calibration')
if not backend_type:
raise BadBackendError(backend)
if backend_type in self.__names_backend_simulator:
ret = {}
return ret
url = get_backend_stats_url(self.config, hub, backend_type)
ret = self.req.get(url + '/calibration')
if not bool(ret):
ret = {}
else:
ret["backend"] = backend_type
return ret | def function[backend_calibration, parameter[self, backend, hub, access_token, user_id]]:
constant[
Get the calibration of a real chip
]
if name[access_token] begin[:]
call[name[self].req.credential.set_token, parameter[name[access_token]]]
if name[user_id] begin[:]
call[name[self].req.credential.set_user_id, parameter[name[user_id]]]
if <ast.UnaryOp object at 0x7da18f00e5f0> begin[:]
<ast.Raise object at 0x7da18f00cdf0>
variable[backend_type] assign[=] call[name[self]._check_backend, parameter[name[backend], constant[calibration]]]
if <ast.UnaryOp object at 0x7da18f00cf70> begin[:]
<ast.Raise object at 0x7da18f00d480>
if compare[name[backend_type] in name[self].__names_backend_simulator] begin[:]
variable[ret] assign[=] dictionary[[], []]
return[name[ret]]
variable[url] assign[=] call[name[get_backend_stats_url], parameter[name[self].config, name[hub], name[backend_type]]]
variable[ret] assign[=] call[name[self].req.get, parameter[binary_operation[name[url] + constant[/calibration]]]]
if <ast.UnaryOp object at 0x7da18f00f1f0> begin[:]
variable[ret] assign[=] dictionary[[], []]
return[name[ret]] | keyword[def] identifier[backend_calibration] ( identifier[self] , identifier[backend] = literal[string] , identifier[hub] = keyword[None] , identifier[access_token] = keyword[None] , identifier[user_id] = keyword[None] ):
literal[string]
keyword[if] identifier[access_token] :
identifier[self] . identifier[req] . identifier[credential] . identifier[set_token] ( identifier[access_token] )
keyword[if] identifier[user_id] :
identifier[self] . identifier[req] . identifier[credential] . identifier[set_user_id] ( identifier[user_id] )
keyword[if] keyword[not] identifier[self] . identifier[check_credentials] ():
keyword[raise] identifier[CredentialsError] ( literal[string] )
identifier[backend_type] = identifier[self] . identifier[_check_backend] ( identifier[backend] , literal[string] )
keyword[if] keyword[not] identifier[backend_type] :
keyword[raise] identifier[BadBackendError] ( identifier[backend] )
keyword[if] identifier[backend_type] keyword[in] identifier[self] . identifier[__names_backend_simulator] :
identifier[ret] ={}
keyword[return] identifier[ret]
identifier[url] = identifier[get_backend_stats_url] ( identifier[self] . identifier[config] , identifier[hub] , identifier[backend_type] )
identifier[ret] = identifier[self] . identifier[req] . identifier[get] ( identifier[url] + literal[string] )
keyword[if] keyword[not] identifier[bool] ( identifier[ret] ):
identifier[ret] ={}
keyword[else] :
identifier[ret] [ literal[string] ]= identifier[backend_type]
keyword[return] identifier[ret] | def backend_calibration(self, backend='ibmqx4', hub=None, access_token=None, user_id=None):
"""
Get the calibration of a real chip
"""
if access_token:
self.req.credential.set_token(access_token) # depends on [control=['if'], data=[]]
if user_id:
self.req.credential.set_user_id(user_id) # depends on [control=['if'], data=[]]
if not self.check_credentials():
raise CredentialsError('credentials invalid') # depends on [control=['if'], data=[]]
backend_type = self._check_backend(backend, 'calibration')
if not backend_type:
raise BadBackendError(backend) # depends on [control=['if'], data=[]]
if backend_type in self.__names_backend_simulator:
ret = {}
return ret # depends on [control=['if'], data=[]]
url = get_backend_stats_url(self.config, hub, backend_type)
ret = self.req.get(url + '/calibration')
if not bool(ret):
ret = {} # depends on [control=['if'], data=[]]
else:
ret['backend'] = backend_type
return ret |
def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message("syntax-error", line=ex.args[1][0], args=ex.args[0])
return None
if not ast_node.pure_python:
self.add_message("raw-checker-failed", args=ast_node.name)
else:
# assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True | def function[check_astroid_module, parameter[self, ast_node, walker, rawcheckers, tokencheckers]]:
constant[Check a module from its astroid representation.]
<ast.Try object at 0x7da1b0315990>
if <ast.UnaryOp object at 0x7da1b020cfa0> begin[:]
call[name[self].add_message, parameter[constant[raw-checker-failed]]]
call[name[walker].walk, parameter[name[ast_node]]]
return[constant[True]] | keyword[def] identifier[check_astroid_module] ( identifier[self] , identifier[ast_node] , identifier[walker] , identifier[rawcheckers] , identifier[tokencheckers] ):
literal[string]
keyword[try] :
identifier[tokens] = identifier[utils] . identifier[tokenize_module] ( identifier[ast_node] )
keyword[except] identifier[tokenize] . identifier[TokenError] keyword[as] identifier[ex] :
identifier[self] . identifier[add_message] ( literal[string] , identifier[line] = identifier[ex] . identifier[args] [ literal[int] ][ literal[int] ], identifier[args] = identifier[ex] . identifier[args] [ literal[int] ])
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[ast_node] . identifier[pure_python] :
identifier[self] . identifier[add_message] ( literal[string] , identifier[args] = identifier[ast_node] . identifier[name] )
keyword[else] :
identifier[self] . identifier[process_tokens] ( identifier[tokens] )
keyword[if] identifier[self] . identifier[_ignore_file] :
keyword[return] keyword[False]
identifier[self] . identifier[file_state] . identifier[collect_block_lines] ( identifier[self] . identifier[msgs_store] , identifier[ast_node] )
keyword[for] identifier[checker] keyword[in] identifier[rawcheckers] :
identifier[checker] . identifier[process_module] ( identifier[ast_node] )
keyword[for] identifier[checker] keyword[in] identifier[tokencheckers] :
identifier[checker] . identifier[process_tokens] ( identifier[tokens] )
identifier[walker] . identifier[walk] ( identifier[ast_node] )
keyword[return] keyword[True] | def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node) # depends on [control=['try'], data=[]]
except tokenize.TokenError as ex:
self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0])
return None # depends on [control=['except'], data=['ex']]
if not ast_node.pure_python:
self.add_message('raw-checker-failed', args=ast_node.name) # depends on [control=['if'], data=[]]
else:
# assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False # depends on [control=['if'], data=[]]
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node) # depends on [control=['for'], data=['checker']]
for checker in tokencheckers:
checker.process_tokens(tokens) # depends on [control=['for'], data=['checker']]
# generate events to astroid checkers
walker.walk(ast_node)
return True |
def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
) | def function[list, parameter[self]]:
constant[
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
]
return[call[name[ModelList], parameter[call[name[self].ghost.execute_get, parameter[binary_operation[constant[%s/] <ast.Mod object at 0x7da2590d6920> name[self]._type_name]]], name[self]._type_name, name[self], name[kwargs]]]] | keyword[def] identifier[list] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[ModelList] (
identifier[self] . identifier[ghost] . identifier[execute_get] ( literal[string] % identifier[self] . identifier[_type_name] ,** identifier[kwargs] ),
identifier[self] . identifier[_type_name] , identifier[self] , identifier[kwargs] , identifier[model_type] = identifier[self] . identifier[_model_type]
) | def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(self.ghost.execute_get('%s/' % self._type_name, **kwargs), self._type_name, self, kwargs, model_type=self._model_type) |
def reset(self):
"""
Releases all entities held by this Unit Of Work (i.e., removes state
information from all registered entities and clears the entity map).
"""
for ents in self.__entity_set_map.values():
for ent in ents:
EntityState.release(ent, self)
self.__entity_set_map.clear() | def function[reset, parameter[self]]:
constant[
Releases all entities held by this Unit Of Work (i.e., removes state
information from all registered entities and clears the entity map).
]
for taget[name[ents]] in starred[call[name[self].__entity_set_map.values, parameter[]]] begin[:]
for taget[name[ent]] in starred[name[ents]] begin[:]
call[name[EntityState].release, parameter[name[ent], name[self]]]
call[name[self].__entity_set_map.clear, parameter[]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
keyword[for] identifier[ents] keyword[in] identifier[self] . identifier[__entity_set_map] . identifier[values] ():
keyword[for] identifier[ent] keyword[in] identifier[ents] :
identifier[EntityState] . identifier[release] ( identifier[ent] , identifier[self] )
identifier[self] . identifier[__entity_set_map] . identifier[clear] () | def reset(self):
"""
Releases all entities held by this Unit Of Work (i.e., removes state
information from all registered entities and clears the entity map).
"""
for ents in self.__entity_set_map.values():
for ent in ents:
EntityState.release(ent, self) # depends on [control=['for'], data=['ent']] # depends on [control=['for'], data=['ents']]
self.__entity_set_map.clear() |
def _accumulate_sufficient_statistics(self, stats, X, framelogprob,
posteriors, fwdlattice, bwdlattice):
"""Updates sufficient statistics from a given sample.
Parameters
----------
stats : dict
Sufficient statistics as returned by
:meth:`~base._BaseHMM._initialize_sufficient_statistics`.
X : array, shape (n_samples, n_features)
Sample sequence.
framelogprob : array, shape (n_samples, n_components)
Log-probabilities of each sample under each of the model states.
posteriors : array, shape (n_samples, n_components)
Posterior probabilities of each sample being generated by each
of the model states.
fwdlattice, bwdlattice : array, shape (n_samples, n_components)
Log-forward and log-backward probabilities.
"""
stats['nobs'] += 1
if 's' in self.params:
stats['start'] += posteriors[0]
if 't' in self.params:
n_samples, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_samples <= 1:
return
log_xi_sum = np.full((n_components, n_components), -np.inf)
_hmmc._compute_log_xi_sum(n_samples, n_components, fwdlattice,
log_mask_zero(self.transmat_),
bwdlattice, framelogprob,
log_xi_sum)
with np.errstate(under="ignore"):
stats['trans'] += np.exp(log_xi_sum) | def function[_accumulate_sufficient_statistics, parameter[self, stats, X, framelogprob, posteriors, fwdlattice, bwdlattice]]:
constant[Updates sufficient statistics from a given sample.
Parameters
----------
stats : dict
Sufficient statistics as returned by
:meth:`~base._BaseHMM._initialize_sufficient_statistics`.
X : array, shape (n_samples, n_features)
Sample sequence.
framelogprob : array, shape (n_samples, n_components)
Log-probabilities of each sample under each of the model states.
posteriors : array, shape (n_samples, n_components)
Posterior probabilities of each sample being generated by each
of the model states.
fwdlattice, bwdlattice : array, shape (n_samples, n_components)
Log-forward and log-backward probabilities.
]
<ast.AugAssign object at 0x7da1b1d5f340>
if compare[constant[s] in name[self].params] begin[:]
<ast.AugAssign object at 0x7da1b1d5dcc0>
if compare[constant[t] in name[self].params] begin[:]
<ast.Tuple object at 0x7da1b1d5e9e0> assign[=] name[framelogprob].shape
if compare[name[n_samples] less_or_equal[<=] constant[1]] begin[:]
return[None]
variable[log_xi_sum] assign[=] call[name[np].full, parameter[tuple[[<ast.Name object at 0x7da1b1d5d390>, <ast.Name object at 0x7da1b1d5e380>]], <ast.UnaryOp object at 0x7da1b1d5cd60>]]
call[name[_hmmc]._compute_log_xi_sum, parameter[name[n_samples], name[n_components], name[fwdlattice], call[name[log_mask_zero], parameter[name[self].transmat_]], name[bwdlattice], name[framelogprob], name[log_xi_sum]]]
with call[name[np].errstate, parameter[]] begin[:]
<ast.AugAssign object at 0x7da1b1d5d420> | keyword[def] identifier[_accumulate_sufficient_statistics] ( identifier[self] , identifier[stats] , identifier[X] , identifier[framelogprob] ,
identifier[posteriors] , identifier[fwdlattice] , identifier[bwdlattice] ):
literal[string]
identifier[stats] [ literal[string] ]+= literal[int]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[params] :
identifier[stats] [ literal[string] ]+= identifier[posteriors] [ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[params] :
identifier[n_samples] , identifier[n_components] = identifier[framelogprob] . identifier[shape]
keyword[if] identifier[n_samples] <= literal[int] :
keyword[return]
identifier[log_xi_sum] = identifier[np] . identifier[full] (( identifier[n_components] , identifier[n_components] ),- identifier[np] . identifier[inf] )
identifier[_hmmc] . identifier[_compute_log_xi_sum] ( identifier[n_samples] , identifier[n_components] , identifier[fwdlattice] ,
identifier[log_mask_zero] ( identifier[self] . identifier[transmat_] ),
identifier[bwdlattice] , identifier[framelogprob] ,
identifier[log_xi_sum] )
keyword[with] identifier[np] . identifier[errstate] ( identifier[under] = literal[string] ):
identifier[stats] [ literal[string] ]+= identifier[np] . identifier[exp] ( identifier[log_xi_sum] ) | def _accumulate_sufficient_statistics(self, stats, X, framelogprob, posteriors, fwdlattice, bwdlattice):
"""Updates sufficient statistics from a given sample.
Parameters
----------
stats : dict
Sufficient statistics as returned by
:meth:`~base._BaseHMM._initialize_sufficient_statistics`.
X : array, shape (n_samples, n_features)
Sample sequence.
framelogprob : array, shape (n_samples, n_components)
Log-probabilities of each sample under each of the model states.
posteriors : array, shape (n_samples, n_components)
Posterior probabilities of each sample being generated by each
of the model states.
fwdlattice, bwdlattice : array, shape (n_samples, n_components)
Log-forward and log-backward probabilities.
"""
stats['nobs'] += 1
if 's' in self.params:
stats['start'] += posteriors[0] # depends on [control=['if'], data=[]]
if 't' in self.params:
(n_samples, n_components) = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_samples <= 1:
return # depends on [control=['if'], data=[]]
log_xi_sum = np.full((n_components, n_components), -np.inf)
_hmmc._compute_log_xi_sum(n_samples, n_components, fwdlattice, log_mask_zero(self.transmat_), bwdlattice, framelogprob, log_xi_sum)
with np.errstate(under='ignore'):
stats['trans'] += np.exp(log_xi_sum) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] |
def to_json(self, *, indent=None, sort_keys = False):
"""Gets the object's JSON representation.
Parameters
----------
indent: :class:`int`, optional
Number of spaces used as indentation, ``None`` will return the shortest possible string.
sort_keys: :class:`bool`, optional
Whether keys should be sorted alphabetically or preserve the order defined by the object.
Returns
-------
:class:`str`
JSON representation of the object.
"""
return json.dumps({k: v for k, v in dict(self).items() if v is not None}, indent=indent, sort_keys=sort_keys,
default=self._try_dict) | def function[to_json, parameter[self]]:
constant[Gets the object's JSON representation.
Parameters
----------
indent: :class:`int`, optional
Number of spaces used as indentation, ``None`` will return the shortest possible string.
sort_keys: :class:`bool`, optional
Whether keys should be sorted alphabetically or preserve the order defined by the object.
Returns
-------
:class:`str`
JSON representation of the object.
]
return[call[name[json].dumps, parameter[<ast.DictComp object at 0x7da20e955330>]]] | keyword[def] identifier[to_json] ( identifier[self] ,*, identifier[indent] = keyword[None] , identifier[sort_keys] = keyword[False] ):
literal[string]
keyword[return] identifier[json] . identifier[dumps] ({ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dict] ( identifier[self] ). identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] }, identifier[indent] = identifier[indent] , identifier[sort_keys] = identifier[sort_keys] ,
identifier[default] = identifier[self] . identifier[_try_dict] ) | def to_json(self, *, indent=None, sort_keys=False):
"""Gets the object's JSON representation.
Parameters
----------
indent: :class:`int`, optional
Number of spaces used as indentation, ``None`` will return the shortest possible string.
sort_keys: :class:`bool`, optional
Whether keys should be sorted alphabetically or preserve the order defined by the object.
Returns
-------
:class:`str`
JSON representation of the object.
"""
return json.dumps({k: v for (k, v) in dict(self).items() if v is not None}, indent=indent, sort_keys=sort_keys, default=self._try_dict) |
def gradient(self):
"""Compute the gradient of the energy for all atoms"""
result = np.zeros((self.numc, 3), float)
for index1 in range(self.numc):
result[index1] = self.gradient_component(index1)
return result | def function[gradient, parameter[self]]:
constant[Compute the gradient of the energy for all atoms]
variable[result] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Attribute object at 0x7da20c76fa00>, <ast.Constant object at 0x7da20c76f640>]], name[float]]]
for taget[name[index1]] in starred[call[name[range], parameter[name[self].numc]]] begin[:]
call[name[result]][name[index1]] assign[=] call[name[self].gradient_component, parameter[name[index1]]]
return[name[result]] | keyword[def] identifier[gradient] ( identifier[self] ):
literal[string]
identifier[result] = identifier[np] . identifier[zeros] (( identifier[self] . identifier[numc] , literal[int] ), identifier[float] )
keyword[for] identifier[index1] keyword[in] identifier[range] ( identifier[self] . identifier[numc] ):
identifier[result] [ identifier[index1] ]= identifier[self] . identifier[gradient_component] ( identifier[index1] )
keyword[return] identifier[result] | def gradient(self):
"""Compute the gradient of the energy for all atoms"""
result = np.zeros((self.numc, 3), float)
for index1 in range(self.numc):
result[index1] = self.gradient_component(index1) # depends on [control=['for'], data=['index1']]
return result |
def restrictCheckers(self, allowedMessages):
"""
Unregister useless checkers to speed up twistedchecker.
@param allowedMessages: output messages allowed in twistedchecker
"""
uselessCheckers = self.findUselessCheckers(allowedMessages)
# Unregister these checkers
for checker in uselessCheckers:
self.unregisterChecker(checker) | def function[restrictCheckers, parameter[self, allowedMessages]]:
constant[
Unregister useless checkers to speed up twistedchecker.
@param allowedMessages: output messages allowed in twistedchecker
]
variable[uselessCheckers] assign[=] call[name[self].findUselessCheckers, parameter[name[allowedMessages]]]
for taget[name[checker]] in starred[name[uselessCheckers]] begin[:]
call[name[self].unregisterChecker, parameter[name[checker]]] | keyword[def] identifier[restrictCheckers] ( identifier[self] , identifier[allowedMessages] ):
literal[string]
identifier[uselessCheckers] = identifier[self] . identifier[findUselessCheckers] ( identifier[allowedMessages] )
keyword[for] identifier[checker] keyword[in] identifier[uselessCheckers] :
identifier[self] . identifier[unregisterChecker] ( identifier[checker] ) | def restrictCheckers(self, allowedMessages):
"""
Unregister useless checkers to speed up twistedchecker.
@param allowedMessages: output messages allowed in twistedchecker
"""
uselessCheckers = self.findUselessCheckers(allowedMessages)
# Unregister these checkers
for checker in uselessCheckers:
self.unregisterChecker(checker) # depends on [control=['for'], data=['checker']] |
def get_a_manager(threadPool_settings=None):
""" On first call, creates and returns a @mirte.core.Manager. On
subsequent calls, returns the previously created instance.
If it is the first call, it will initialize the threadPool
with @threadPool_settings. """
global __singleton_manager
if __singleton_manager is None:
def _thread_entry():
if prctl:
prctl.set_name('mirte manager')
m.run()
l.info('manager.run() returned')
l = logging.getLogger('mirte.get_a_manager')
l.info("Creating new instance")
m = Manager(logging.getLogger('mirte'))
if threadPool_settings:
m.update_instance('threadPool', threadPool_settings)
threading.Thread(target=_thread_entry).start()
m.running_event.wait()
__singleton_manager = m
return __singleton_manager | def function[get_a_manager, parameter[threadPool_settings]]:
constant[ On first call, creates and returns a @mirte.core.Manager. On
subsequent calls, returns the previously created instance.
If it is the first call, it will initialize the threadPool
with @threadPool_settings. ]
<ast.Global object at 0x7da1b149db10>
if compare[name[__singleton_manager] is constant[None]] begin[:]
def function[_thread_entry, parameter[]]:
if name[prctl] begin[:]
call[name[prctl].set_name, parameter[constant[mirte manager]]]
call[name[m].run, parameter[]]
call[name[l].info, parameter[constant[manager.run() returned]]]
variable[l] assign[=] call[name[logging].getLogger, parameter[constant[mirte.get_a_manager]]]
call[name[l].info, parameter[constant[Creating new instance]]]
variable[m] assign[=] call[name[Manager], parameter[call[name[logging].getLogger, parameter[constant[mirte]]]]]
if name[threadPool_settings] begin[:]
call[name[m].update_instance, parameter[constant[threadPool], name[threadPool_settings]]]
call[call[name[threading].Thread, parameter[]].start, parameter[]]
call[name[m].running_event.wait, parameter[]]
variable[__singleton_manager] assign[=] name[m]
return[name[__singleton_manager]] | keyword[def] identifier[get_a_manager] ( identifier[threadPool_settings] = keyword[None] ):
literal[string]
keyword[global] identifier[__singleton_manager]
keyword[if] identifier[__singleton_manager] keyword[is] keyword[None] :
keyword[def] identifier[_thread_entry] ():
keyword[if] identifier[prctl] :
identifier[prctl] . identifier[set_name] ( literal[string] )
identifier[m] . identifier[run] ()
identifier[l] . identifier[info] ( literal[string] )
identifier[l] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[l] . identifier[info] ( literal[string] )
identifier[m] = identifier[Manager] ( identifier[logging] . identifier[getLogger] ( literal[string] ))
keyword[if] identifier[threadPool_settings] :
identifier[m] . identifier[update_instance] ( literal[string] , identifier[threadPool_settings] )
identifier[threading] . identifier[Thread] ( identifier[target] = identifier[_thread_entry] ). identifier[start] ()
identifier[m] . identifier[running_event] . identifier[wait] ()
identifier[__singleton_manager] = identifier[m]
keyword[return] identifier[__singleton_manager] | def get_a_manager(threadPool_settings=None):
""" On first call, creates and returns a @mirte.core.Manager. On
subsequent calls, returns the previously created instance.
If it is the first call, it will initialize the threadPool
with @threadPool_settings. """
global __singleton_manager
if __singleton_manager is None:
def _thread_entry():
if prctl:
prctl.set_name('mirte manager') # depends on [control=['if'], data=[]]
m.run()
l.info('manager.run() returned')
l = logging.getLogger('mirte.get_a_manager')
l.info('Creating new instance')
m = Manager(logging.getLogger('mirte'))
if threadPool_settings:
m.update_instance('threadPool', threadPool_settings) # depends on [control=['if'], data=[]]
threading.Thread(target=_thread_entry).start()
m.running_event.wait()
__singleton_manager = m # depends on [control=['if'], data=['__singleton_manager']]
return __singleton_manager |
def write_outxy(self,filename):
""" Write out the output(transformed) XY catalog for this image to a file.
"""
f = open(filename,'w')
f.write("#Pixel positions for: "+self.name+'\n')
f.write("#X Y\n")
f.write("#(pix) (pix)\n")
for i in range(self.all_radec[0].shape[0]):
f.write('%f %f\n'%(self.outxy[i,0],self.outxy[i,1]))
f.close() | def function[write_outxy, parameter[self, filename]]:
constant[ Write out the output(transformed) XY catalog for this image to a file.
]
variable[f] assign[=] call[name[open], parameter[name[filename], constant[w]]]
call[name[f].write, parameter[binary_operation[binary_operation[constant[#Pixel positions for: ] + name[self].name] + constant[
]]]]
call[name[f].write, parameter[constant[#X Y
]]]
call[name[f].write, parameter[constant[#(pix) (pix)
]]]
for taget[name[i]] in starred[call[name[range], parameter[call[call[name[self].all_radec][constant[0]].shape][constant[0]]]]] begin[:]
call[name[f].write, parameter[binary_operation[constant[%f %f
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1bad150>, <ast.Subscript object at 0x7da1b1c22050>]]]]]
call[name[f].close, parameter[]] | keyword[def] identifier[write_outxy] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
identifier[f] . identifier[write] ( literal[string] + identifier[self] . identifier[name] + literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[all_radec] [ literal[int] ]. identifier[shape] [ literal[int] ]):
identifier[f] . identifier[write] ( literal[string] %( identifier[self] . identifier[outxy] [ identifier[i] , literal[int] ], identifier[self] . identifier[outxy] [ identifier[i] , literal[int] ]))
identifier[f] . identifier[close] () | def write_outxy(self, filename):
""" Write out the output(transformed) XY catalog for this image to a file.
"""
f = open(filename, 'w')
f.write('#Pixel positions for: ' + self.name + '\n')
f.write('#X Y\n')
f.write('#(pix) (pix)\n')
for i in range(self.all_radec[0].shape[0]):
f.write('%f %f\n' % (self.outxy[i, 0], self.outxy[i, 1])) # depends on [control=['for'], data=['i']]
f.close() |
def _line_2_pair(line):
'''Return bash variable declaration as name-value pair.
Name as lower case str. Value itself only without surrounding '"' (if any).
For example, _line_2_pair('NAME="Ubuntu"') will return ('name', 'Ubuntu')
'''
key, val = line.split('=')
return key.lower(), val.strip('"') | def function[_line_2_pair, parameter[line]]:
constant[Return bash variable declaration as name-value pair.
Name as lower case str. Value itself only without surrounding '"' (if any).
For example, _line_2_pair('NAME="Ubuntu"') will return ('name', 'Ubuntu')
]
<ast.Tuple object at 0x7da1b2538c10> assign[=] call[name[line].split, parameter[constant[=]]]
return[tuple[[<ast.Call object at 0x7da1b2539ed0>, <ast.Call object at 0x7da1b2539b10>]]] | keyword[def] identifier[_line_2_pair] ( identifier[line] ):
literal[string]
identifier[key] , identifier[val] = identifier[line] . identifier[split] ( literal[string] )
keyword[return] identifier[key] . identifier[lower] (), identifier[val] . identifier[strip] ( literal[string] ) | def _line_2_pair(line):
"""Return bash variable declaration as name-value pair.
Name as lower case str. Value itself only without surrounding '"' (if any).
For example, _line_2_pair('NAME="Ubuntu"') will return ('name', 'Ubuntu')
"""
(key, val) = line.split('=')
return (key.lower(), val.strip('"')) |
def _get_notifications_status(self, notifications):
"""
Get the notifications status
"""
if notifications:
size = len(notifications["activeNotifications"])
else:
size = 0
status = self.status_notif if size > 0 else self.status_no_notif
return (size, status) | def function[_get_notifications_status, parameter[self, notifications]]:
constant[
Get the notifications status
]
if name[notifications] begin[:]
variable[size] assign[=] call[name[len], parameter[call[name[notifications]][constant[activeNotifications]]]]
variable[status] assign[=] <ast.IfExp object at 0x7da18bc71ba0>
return[tuple[[<ast.Name object at 0x7da18bc71ff0>, <ast.Name object at 0x7da18bc715a0>]]] | keyword[def] identifier[_get_notifications_status] ( identifier[self] , identifier[notifications] ):
literal[string]
keyword[if] identifier[notifications] :
identifier[size] = identifier[len] ( identifier[notifications] [ literal[string] ])
keyword[else] :
identifier[size] = literal[int]
identifier[status] = identifier[self] . identifier[status_notif] keyword[if] identifier[size] > literal[int] keyword[else] identifier[self] . identifier[status_no_notif]
keyword[return] ( identifier[size] , identifier[status] ) | def _get_notifications_status(self, notifications):
"""
Get the notifications status
"""
if notifications:
size = len(notifications['activeNotifications']) # depends on [control=['if'], data=[]]
else:
size = 0
status = self.status_notif if size > 0 else self.status_no_notif
return (size, status) |
def set_(name, add, match):
'''
Add a value to the named set
USAGE:
.. code-block:: yaml
foo:
reg.set:
- add: bar
- match: my/custom/event
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if name not in __reg__:
__reg__[name] = {}
__reg__[name]['val'] = set()
for event in __events__:
if salt.utils.stringutils.expr_match(event['tag'], match):
try:
val = event['data']['data'].get(add)
except KeyError:
val = event['data'].get(add)
if val is None:
val = 'None'
ret['changes'][add] = val
__reg__[name]['val'].add(val)
return ret | def function[set_, parameter[name, add, match]]:
constant[
Add a value to the named set
USAGE:
.. code-block:: yaml
foo:
reg.set:
- add: bar
- match: my/custom/event
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21ebbe0>, <ast.Constant object at 0x7da1b21eadd0>, <ast.Constant object at 0x7da1b21ebee0>, <ast.Constant object at 0x7da1b21e8fd0>], [<ast.Name object at 0x7da1b21e9690>, <ast.Dict object at 0x7da1b21eb6a0>, <ast.Constant object at 0x7da1b21e8e20>, <ast.Constant object at 0x7da1b21eb910>]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[__reg__]] begin[:]
call[name[__reg__]][name[name]] assign[=] dictionary[[], []]
call[call[name[__reg__]][name[name]]][constant[val]] assign[=] call[name[set], parameter[]]
for taget[name[event]] in starred[name[__events__]] begin[:]
if call[name[salt].utils.stringutils.expr_match, parameter[call[name[event]][constant[tag]], name[match]]] begin[:]
<ast.Try object at 0x7da1b21e8d90>
if compare[name[val] is constant[None]] begin[:]
variable[val] assign[=] constant[None]
call[call[name[ret]][constant[changes]]][name[add]] assign[=] name[val]
call[call[call[name[__reg__]][name[name]]][constant[val]].add, parameter[name[val]]]
return[name[ret]] | keyword[def] identifier[set_] ( identifier[name] , identifier[add] , identifier[match] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : literal[string] ,
literal[string] : keyword[True] }
keyword[if] identifier[name] keyword[not] keyword[in] identifier[__reg__] :
identifier[__reg__] [ identifier[name] ]={}
identifier[__reg__] [ identifier[name] ][ literal[string] ]= identifier[set] ()
keyword[for] identifier[event] keyword[in] identifier[__events__] :
keyword[if] identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[expr_match] ( identifier[event] [ literal[string] ], identifier[match] ):
keyword[try] :
identifier[val] = identifier[event] [ literal[string] ][ literal[string] ]. identifier[get] ( identifier[add] )
keyword[except] identifier[KeyError] :
identifier[val] = identifier[event] [ literal[string] ]. identifier[get] ( identifier[add] )
keyword[if] identifier[val] keyword[is] keyword[None] :
identifier[val] = literal[string]
identifier[ret] [ literal[string] ][ identifier[add] ]= identifier[val]
identifier[__reg__] [ identifier[name] ][ literal[string] ]. identifier[add] ( identifier[val] )
keyword[return] identifier[ret] | def set_(name, add, match):
"""
Add a value to the named set
USAGE:
.. code-block:: yaml
foo:
reg.set:
- add: bar
- match: my/custom/event
"""
ret = {'name': name, 'changes': {}, 'comment': '', 'result': True}
if name not in __reg__:
__reg__[name] = {}
__reg__[name]['val'] = set() # depends on [control=['if'], data=['name', '__reg__']]
for event in __events__:
if salt.utils.stringutils.expr_match(event['tag'], match):
try:
val = event['data']['data'].get(add) # depends on [control=['try'], data=[]]
except KeyError:
val = event['data'].get(add) # depends on [control=['except'], data=[]]
if val is None:
val = 'None' # depends on [control=['if'], data=['val']]
ret['changes'][add] = val
__reg__[name]['val'].add(val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['event']]
return ret |
def sizeOfOverlap(self, e):
"""
Get the size of the overlap between self and e.
:return: the number of bases that are shared in common between self and e.
"""
# no overlap
if not self.intersects(e):
return 0
# complete inclusion..
if e.start >= self.start and e.end <= self.end:
return len(e)
if self.start >= e.start and self.end <= e.end:
return len(self)
# partial overlap
if e.start > self.start:
return (self.end - e.start)
if self.start > e.start:
return (e.end - self.start) | def function[sizeOfOverlap, parameter[self, e]]:
constant[
Get the size of the overlap between self and e.
:return: the number of bases that are shared in common between self and e.
]
if <ast.UnaryOp object at 0x7da18f58e350> begin[:]
return[constant[0]]
if <ast.BoolOp object at 0x7da18f58db40> begin[:]
return[call[name[len], parameter[name[e]]]]
if <ast.BoolOp object at 0x7da18f58db70> begin[:]
return[call[name[len], parameter[name[self]]]]
if compare[name[e].start greater[>] name[self].start] begin[:]
return[binary_operation[name[self].end - name[e].start]]
if compare[name[self].start greater[>] name[e].start] begin[:]
return[binary_operation[name[e].end - name[self].start]] | keyword[def] identifier[sizeOfOverlap] ( identifier[self] , identifier[e] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[intersects] ( identifier[e] ):
keyword[return] literal[int]
keyword[if] identifier[e] . identifier[start] >= identifier[self] . identifier[start] keyword[and] identifier[e] . identifier[end] <= identifier[self] . identifier[end] :
keyword[return] identifier[len] ( identifier[e] )
keyword[if] identifier[self] . identifier[start] >= identifier[e] . identifier[start] keyword[and] identifier[self] . identifier[end] <= identifier[e] . identifier[end] :
keyword[return] identifier[len] ( identifier[self] )
keyword[if] identifier[e] . identifier[start] > identifier[self] . identifier[start] :
keyword[return] ( identifier[self] . identifier[end] - identifier[e] . identifier[start] )
keyword[if] identifier[self] . identifier[start] > identifier[e] . identifier[start] :
keyword[return] ( identifier[e] . identifier[end] - identifier[self] . identifier[start] ) | def sizeOfOverlap(self, e):
"""
Get the size of the overlap between self and e.
:return: the number of bases that are shared in common between self and e.
"""
# no overlap
if not self.intersects(e):
return 0 # depends on [control=['if'], data=[]]
# complete inclusion..
if e.start >= self.start and e.end <= self.end:
return len(e) # depends on [control=['if'], data=[]]
if self.start >= e.start and self.end <= e.end:
return len(self) # depends on [control=['if'], data=[]]
# partial overlap
if e.start > self.start:
return self.end - e.start # depends on [control=['if'], data=[]]
if self.start > e.start:
return e.end - self.start # depends on [control=['if'], data=[]] |
def get_new_version(current_version: str, level_bump: str) -> str:
"""
Calculates the next version based on the given bump level with semver.
:param current_version: The version the package has now.
:param level_bump: The level of the version number that should be bumped. Should be a `'major'`,
`'minor'` or `'patch'`.
:return: A string with the next version number.
"""
debug('get_new_version("{}", "{}")'.format(current_version, level_bump))
if not level_bump:
return current_version
return getattr(semver, 'bump_{0}'.format(level_bump))(current_version) | def function[get_new_version, parameter[current_version, level_bump]]:
constant[
Calculates the next version based on the given bump level with semver.
:param current_version: The version the package has now.
:param level_bump: The level of the version number that should be bumped. Should be a `'major'`,
`'minor'` or `'patch'`.
:return: A string with the next version number.
]
call[name[debug], parameter[call[constant[get_new_version("{}", "{}")].format, parameter[name[current_version], name[level_bump]]]]]
if <ast.UnaryOp object at 0x7da207f01ab0> begin[:]
return[name[current_version]]
return[call[call[name[getattr], parameter[name[semver], call[constant[bump_{0}].format, parameter[name[level_bump]]]]], parameter[name[current_version]]]] | keyword[def] identifier[get_new_version] ( identifier[current_version] : identifier[str] , identifier[level_bump] : identifier[str] )-> identifier[str] :
literal[string]
identifier[debug] ( literal[string] . identifier[format] ( identifier[current_version] , identifier[level_bump] ))
keyword[if] keyword[not] identifier[level_bump] :
keyword[return] identifier[current_version]
keyword[return] identifier[getattr] ( identifier[semver] , literal[string] . identifier[format] ( identifier[level_bump] ))( identifier[current_version] ) | def get_new_version(current_version: str, level_bump: str) -> str:
"""
Calculates the next version based on the given bump level with semver.
:param current_version: The version the package has now.
:param level_bump: The level of the version number that should be bumped. Should be a `'major'`,
`'minor'` or `'patch'`.
:return: A string with the next version number.
"""
debug('get_new_version("{}", "{}")'.format(current_version, level_bump))
if not level_bump:
return current_version # depends on [control=['if'], data=[]]
return getattr(semver, 'bump_{0}'.format(level_bump))(current_version) |
def use(self, kind, name):
"""
Mark a node name as used.
"""
try:
params = self._parse(name)
index = int(params['index'], 10)
if index in self._free[kind]:
self._free[kind].remove(index)
top = self._top[kind]
if index > top:
self._free[kind].update(range(top + 1, index))
self._top[kind] = index
except ValueError:
log.warning(
"Cannot extract numerical index"
" from node name `%s`!", name) | def function[use, parameter[self, kind, name]]:
constant[
Mark a node name as used.
]
<ast.Try object at 0x7da20c9934f0> | keyword[def] identifier[use] ( identifier[self] , identifier[kind] , identifier[name] ):
literal[string]
keyword[try] :
identifier[params] = identifier[self] . identifier[_parse] ( identifier[name] )
identifier[index] = identifier[int] ( identifier[params] [ literal[string] ], literal[int] )
keyword[if] identifier[index] keyword[in] identifier[self] . identifier[_free] [ identifier[kind] ]:
identifier[self] . identifier[_free] [ identifier[kind] ]. identifier[remove] ( identifier[index] )
identifier[top] = identifier[self] . identifier[_top] [ identifier[kind] ]
keyword[if] identifier[index] > identifier[top] :
identifier[self] . identifier[_free] [ identifier[kind] ]. identifier[update] ( identifier[range] ( identifier[top] + literal[int] , identifier[index] ))
identifier[self] . identifier[_top] [ identifier[kind] ]= identifier[index]
keyword[except] identifier[ValueError] :
identifier[log] . identifier[warning] (
literal[string]
literal[string] , identifier[name] ) | def use(self, kind, name):
"""
Mark a node name as used.
"""
try:
params = self._parse(name)
index = int(params['index'], 10)
if index in self._free[kind]:
self._free[kind].remove(index) # depends on [control=['if'], data=['index']]
top = self._top[kind]
if index > top:
self._free[kind].update(range(top + 1, index))
self._top[kind] = index # depends on [control=['if'], data=['index', 'top']] # depends on [control=['try'], data=[]]
except ValueError:
log.warning('Cannot extract numerical index from node name `%s`!', name) # depends on [control=['except'], data=[]] |
def vt_name_check(domain, vt_api):
"""Checks VirusTotal for occurrences of a domain name"""
if not is_fqdn(domain):
return None
url = 'https://www.virustotal.com/vtapi/v2/domain/report'
parameters = {'domain': domain, 'apikey': vt_api}
response = requests.get(url, params=parameters)
try:
return response.json()
except ValueError:
return None | def function[vt_name_check, parameter[domain, vt_api]]:
constant[Checks VirusTotal for occurrences of a domain name]
if <ast.UnaryOp object at 0x7da1b28ad240> begin[:]
return[constant[None]]
variable[url] assign[=] constant[https://www.virustotal.com/vtapi/v2/domain/report]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da1b28aca00>, <ast.Constant object at 0x7da1b28ae800>], [<ast.Name object at 0x7da1b28ae8c0>, <ast.Name object at 0x7da1b28adc30>]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
<ast.Try object at 0x7da1b28ac5b0> | keyword[def] identifier[vt_name_check] ( identifier[domain] , identifier[vt_api] ):
literal[string]
keyword[if] keyword[not] identifier[is_fqdn] ( identifier[domain] ):
keyword[return] keyword[None]
identifier[url] = literal[string]
identifier[parameters] ={ literal[string] : identifier[domain] , literal[string] : identifier[vt_api] }
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[params] = identifier[parameters] )
keyword[try] :
keyword[return] identifier[response] . identifier[json] ()
keyword[except] identifier[ValueError] :
keyword[return] keyword[None] | def vt_name_check(domain, vt_api):
"""Checks VirusTotal for occurrences of a domain name"""
if not is_fqdn(domain):
return None # depends on [control=['if'], data=[]]
url = 'https://www.virustotal.com/vtapi/v2/domain/report'
parameters = {'domain': domain, 'apikey': vt_api}
response = requests.get(url, params=parameters)
try:
return response.json() # depends on [control=['try'], data=[]]
except ValueError:
return None # depends on [control=['except'], data=[]] |
def create_parser(default_name: str) -> argparse.ArgumentParser:
"""
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
"""
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host',
help='Host to which the app binds. [%(default)s]',
default='0.0.0.0')
argparser.add_argument('-p', '--port',
help='Port to which the app binds. [%(default)s]',
default=5000,
type=int)
argparser.add_argument('-o', '--output',
help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name',
help='Service name. This will be used as prefix for all endpoints. [%(default)s]',
default=default_name)
argparser.add_argument('--debug',
help='Run the app in debug mode. [%(default)s]',
action='store_true')
argparser.add_argument('--eventbus-host',
help='Hostname at which the eventbus can be reached [%(default)s]',
default='eventbus')
argparser.add_argument('--eventbus-port',
help='Port at which the eventbus can be reached [%(default)s]',
default=5672,
type=int)
return argparser | def function[create_parser, parameter[default_name]]:
constant[
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
]
variable[argparser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[argparser].add_argument, parameter[constant[-H], constant[--host]]]
call[name[argparser].add_argument, parameter[constant[-p], constant[--port]]]
call[name[argparser].add_argument, parameter[constant[-o], constant[--output]]]
call[name[argparser].add_argument, parameter[constant[-n], constant[--name]]]
call[name[argparser].add_argument, parameter[constant[--debug]]]
call[name[argparser].add_argument, parameter[constant[--eventbus-host]]]
call[name[argparser].add_argument, parameter[constant[--eventbus-port]]]
return[name[argparser]] | keyword[def] identifier[create_parser] ( identifier[default_name] : identifier[str] )-> identifier[argparse] . identifier[ArgumentParser] :
literal[string]
identifier[argparser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[fromfile_prefix_chars] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[int] ,
identifier[type] = identifier[int] )
identifier[argparser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[help] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = identifier[default_name] )
identifier[argparser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[action] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[string] )
identifier[argparser] . identifier[add_argument] ( literal[string] ,
identifier[help] = literal[string] ,
identifier[default] = literal[int] ,
identifier[type] = identifier[int] )
keyword[return] identifier[argparser] | def create_parser(default_name: str) -> argparse.ArgumentParser:
"""
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
"""
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0')
argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int)
argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name)
argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true')
argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus')
argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int)
return argparser |
def subvol_create(self, path):
"""
Create a btrfs subvolume in the specified path
:param path: path to create
"""
args = {
'path': path
}
self._subvol_chk.check(args)
self._client.sync('btrfs.subvol_create', args) | def function[subvol_create, parameter[self, path]]:
constant[
Create a btrfs subvolume in the specified path
:param path: path to create
]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b04db610>], [<ast.Name object at 0x7da1b04dbe20>]]
call[name[self]._subvol_chk.check, parameter[name[args]]]
call[name[self]._client.sync, parameter[constant[btrfs.subvol_create], name[args]]] | keyword[def] identifier[subvol_create] ( identifier[self] , identifier[path] ):
literal[string]
identifier[args] ={
literal[string] : identifier[path]
}
identifier[self] . identifier[_subvol_chk] . identifier[check] ( identifier[args] )
identifier[self] . identifier[_client] . identifier[sync] ( literal[string] , identifier[args] ) | def subvol_create(self, path):
"""
Create a btrfs subvolume in the specified path
:param path: path to create
"""
args = {'path': path}
self._subvol_chk.check(args)
self._client.sync('btrfs.subvol_create', args) |
def consoleFormat(self, sectionTitle, lst, fmt_spec):
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`~exhale.graph.ExhaleNode.toConsole` with ``0``
as the level for every ExhaleNode in ``lst``.
**Parameters**
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console.
'''
if not configs.verboseBuild:
return
utils.verbose_log(textwrap.dedent('''
###########################################################
## {0}
###########################################################'''.format(sectionTitle)))
for l in lst:
l.toConsole(0, fmt_spec) | def function[consoleFormat, parameter[self, sectionTitle, lst, fmt_spec]]:
constant[
Helper method for :func:`~exhale.graph.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`~exhale.graph.ExhaleNode.toConsole` with ``0``
as the level for every ExhaleNode in ``lst``.
**Parameters**
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console.
]
if <ast.UnaryOp object at 0x7da1b0782290> begin[:]
return[None]
call[name[utils].verbose_log, parameter[call[name[textwrap].dedent, parameter[call[constant[
###########################################################
## {0}
###########################################################].format, parameter[name[sectionTitle]]]]]]]
for taget[name[l]] in starred[name[lst]] begin[:]
call[name[l].toConsole, parameter[constant[0], name[fmt_spec]]] | keyword[def] identifier[consoleFormat] ( identifier[self] , identifier[sectionTitle] , identifier[lst] , identifier[fmt_spec] ):
literal[string]
keyword[if] keyword[not] identifier[configs] . identifier[verboseBuild] :
keyword[return]
identifier[utils] . identifier[verbose_log] ( identifier[textwrap] . identifier[dedent] ( literal[string] . identifier[format] ( identifier[sectionTitle] )))
keyword[for] identifier[l] keyword[in] identifier[lst] :
identifier[l] . identifier[toConsole] ( literal[int] , identifier[fmt_spec] ) | def consoleFormat(self, sectionTitle, lst, fmt_spec):
"""
Helper method for :func:`~exhale.graph.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`~exhale.graph.ExhaleNode.toConsole` with ``0``
as the level for every ExhaleNode in ``lst``.
**Parameters**
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console.
"""
if not configs.verboseBuild:
return # depends on [control=['if'], data=[]]
utils.verbose_log(textwrap.dedent('\n ###########################################################\n ## {0}\n ###########################################################'.format(sectionTitle)))
for l in lst:
l.toConsole(0, fmt_spec) # depends on [control=['for'], data=['l']] |
def storage_soc_sorted(network, filename = None):
"""
Plots the soc (state-pf-charge) of extendable storages
Parameters
----------
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
filename : path to folder
"""
sbatt = network.storage_units.index[(network.storage_units.p_nom_opt>1) &
(network.storage_units.capital_cost>10) &
(network.storage_units.max_hours==6)]
shydr = network.storage_units.index[(network.storage_units.p_nom_opt>1) &
(network.storage_units.capital_cost>10)
& (network.storage_units.max_hours==168)]
cap_batt = (network.storage_units.max_hours[sbatt] *
network.storage_units.p_nom_opt[sbatt]).sum()
cap_hydr = (network.storage_units.max_hours[shydr] *
network.storage_units.p_nom_opt[shydr]).sum()
fig, ax = plt.subplots(1, 1)
if network.storage_units.p_nom_opt[sbatt].sum() < 1 and \
network.storage_units.p_nom_opt[shydr].sum() < 1:
print("No storage unit to plot")
elif network.storage_units.p_nom_opt[sbatt].sum() > 1 and \
network.storage_units.p_nom_opt[shydr].sum() < 1:
(network.storage_units_t.p[sbatt].sum(axis=1).sort_values(
ascending=False).reset_index() / \
network.storage_units.p_nom_opt[sbatt].sum())[0].plot(
ax=ax, label="Battery storage", color='orangered')
elif network.storage_units.p_nom_opt[sbatt].sum() < 1 and \
network.storage_units.p_nom_opt[shydr].sum() > 1:
(network.storage_units_t.p[shydr].sum(axis=1).sort_values(
ascending=False).reset_index() / \
network.storage_units.p_nom_opt[shydr].sum())[0].plot(
ax=ax, label="Hydrogen storage", color='teal')
else:
(network.storage_units_t.p[sbatt].sum(axis=1).sort_values(
ascending=False).reset_index() / \
network.storage_units.p_nom_opt[sbatt].sum())[0].plot(
ax=ax, label="Battery storage", color='orangered')
(network.storage_units_t.p[shydr].sum(axis=1).sort_values(
ascending=False).reset_index() / \
network.storage_units.p_nom_opt[shydr].sum())[0].plot(
ax=ax, label="Hydrogen storage", color='teal')
ax.set_xlabel("")
ax.set_ylabel("Storage dispatch in p.u. \n <- charge - discharge ->")
ax.set_ylim([-1.05,1.05])
ax.legend()
ax.set_title("Sorted duration curve of storage dispatch")
if filename is None:
plt.show()
else:
plt.savefig(filename,figsize=(3,4),bbox_inches='tight')
plt.close()
return | def function[storage_soc_sorted, parameter[network, filename]]:
constant[
Plots the soc (state-pf-charge) of extendable storages
Parameters
----------
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
filename : path to folder
]
variable[sbatt] assign[=] call[name[network].storage_units.index][binary_operation[binary_operation[compare[name[network].storage_units.p_nom_opt greater[>] constant[1]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[network].storage_units.capital_cost greater[>] constant[10]]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[network].storage_units.max_hours equal[==] constant[6]]]]
variable[shydr] assign[=] call[name[network].storage_units.index][binary_operation[binary_operation[compare[name[network].storage_units.p_nom_opt greater[>] constant[1]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[network].storage_units.capital_cost greater[>] constant[10]]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[network].storage_units.max_hours equal[==] constant[168]]]]
variable[cap_batt] assign[=] call[binary_operation[call[name[network].storage_units.max_hours][name[sbatt]] * call[name[network].storage_units.p_nom_opt][name[sbatt]]].sum, parameter[]]
variable[cap_hydr] assign[=] call[binary_operation[call[name[network].storage_units.max_hours][name[shydr]] * call[name[network].storage_units.p_nom_opt][name[shydr]]].sum, parameter[]]
<ast.Tuple object at 0x7da1b1b58f10> assign[=] call[name[plt].subplots, parameter[constant[1], constant[1]]]
if <ast.BoolOp object at 0x7da1b1b5a830> begin[:]
call[name[print], parameter[constant[No storage unit to plot]]]
call[name[ax].set_xlabel, parameter[constant[]]]
call[name[ax].set_ylabel, parameter[constant[Storage dispatch in p.u.
<- charge - discharge ->]]]
call[name[ax].set_ylim, parameter[list[[<ast.UnaryOp object at 0x7da1b1b58760>, <ast.Constant object at 0x7da1b1b587c0>]]]]
call[name[ax].legend, parameter[]]
call[name[ax].set_title, parameter[constant[Sorted duration curve of storage dispatch]]]
if compare[name[filename] is constant[None]] begin[:]
call[name[plt].show, parameter[]]
return[None] | keyword[def] identifier[storage_soc_sorted] ( identifier[network] , identifier[filename] = keyword[None] ):
literal[string]
identifier[sbatt] = identifier[network] . identifier[storage_units] . identifier[index] [( identifier[network] . identifier[storage_units] . identifier[p_nom_opt] > literal[int] )&
( identifier[network] . identifier[storage_units] . identifier[capital_cost] > literal[int] )&
( identifier[network] . identifier[storage_units] . identifier[max_hours] == literal[int] )]
identifier[shydr] = identifier[network] . identifier[storage_units] . identifier[index] [( identifier[network] . identifier[storage_units] . identifier[p_nom_opt] > literal[int] )&
( identifier[network] . identifier[storage_units] . identifier[capital_cost] > literal[int] )
&( identifier[network] . identifier[storage_units] . identifier[max_hours] == literal[int] )]
identifier[cap_batt] =( identifier[network] . identifier[storage_units] . identifier[max_hours] [ identifier[sbatt] ]*
identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[sbatt] ]). identifier[sum] ()
identifier[cap_hydr] =( identifier[network] . identifier[storage_units] . identifier[max_hours] [ identifier[shydr] ]*
identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[shydr] ]). identifier[sum] ()
identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ( literal[int] , literal[int] )
keyword[if] identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[sbatt] ]. identifier[sum] ()< literal[int] keyword[and] identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[shydr] ]. identifier[sum] ()< literal[int] :
identifier[print] ( literal[string] )
keyword[elif] identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[sbatt] ]. identifier[sum] ()> literal[int] keyword[and] identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[shydr] ]. identifier[sum] ()< literal[int] :
( identifier[network] . identifier[storage_units_t] . identifier[p] [ identifier[sbatt] ]. identifier[sum] ( identifier[axis] = literal[int] ). identifier[sort_values] (
identifier[ascending] = keyword[False] ). identifier[reset_index] ()/ identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[sbatt] ]. identifier[sum] ())[ literal[int] ]. identifier[plot] (
identifier[ax] = identifier[ax] , identifier[label] = literal[string] , identifier[color] = literal[string] )
keyword[elif] identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[sbatt] ]. identifier[sum] ()< literal[int] keyword[and] identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[shydr] ]. identifier[sum] ()> literal[int] :
( identifier[network] . identifier[storage_units_t] . identifier[p] [ identifier[shydr] ]. identifier[sum] ( identifier[axis] = literal[int] ). identifier[sort_values] (
identifier[ascending] = keyword[False] ). identifier[reset_index] ()/ identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[shydr] ]. identifier[sum] ())[ literal[int] ]. identifier[plot] (
identifier[ax] = identifier[ax] , identifier[label] = literal[string] , identifier[color] = literal[string] )
keyword[else] :
( identifier[network] . identifier[storage_units_t] . identifier[p] [ identifier[sbatt] ]. identifier[sum] ( identifier[axis] = literal[int] ). identifier[sort_values] (
identifier[ascending] = keyword[False] ). identifier[reset_index] ()/ identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[sbatt] ]. identifier[sum] ())[ literal[int] ]. identifier[plot] (
identifier[ax] = identifier[ax] , identifier[label] = literal[string] , identifier[color] = literal[string] )
( identifier[network] . identifier[storage_units_t] . identifier[p] [ identifier[shydr] ]. identifier[sum] ( identifier[axis] = literal[int] ). identifier[sort_values] (
identifier[ascending] = keyword[False] ). identifier[reset_index] ()/ identifier[network] . identifier[storage_units] . identifier[p_nom_opt] [ identifier[shydr] ]. identifier[sum] ())[ literal[int] ]. identifier[plot] (
identifier[ax] = identifier[ax] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_ylim] ([- literal[int] , literal[int] ])
identifier[ax] . identifier[legend] ()
identifier[ax] . identifier[set_title] ( literal[string] )
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[plt] . identifier[show] ()
keyword[else] :
identifier[plt] . identifier[savefig] ( identifier[filename] , identifier[figsize] =( literal[int] , literal[int] ), identifier[bbox_inches] = literal[string] )
identifier[plt] . identifier[close] ()
keyword[return] | def storage_soc_sorted(network, filename=None):
"""
Plots the soc (state-pf-charge) of extendable storages
Parameters
----------
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
filename : path to folder
"""
sbatt = network.storage_units.index[(network.storage_units.p_nom_opt > 1) & (network.storage_units.capital_cost > 10) & (network.storage_units.max_hours == 6)]
shydr = network.storage_units.index[(network.storage_units.p_nom_opt > 1) & (network.storage_units.capital_cost > 10) & (network.storage_units.max_hours == 168)]
cap_batt = (network.storage_units.max_hours[sbatt] * network.storage_units.p_nom_opt[sbatt]).sum()
cap_hydr = (network.storage_units.max_hours[shydr] * network.storage_units.p_nom_opt[shydr]).sum()
(fig, ax) = plt.subplots(1, 1)
if network.storage_units.p_nom_opt[sbatt].sum() < 1 and network.storage_units.p_nom_opt[shydr].sum() < 1:
print('No storage unit to plot') # depends on [control=['if'], data=[]]
elif network.storage_units.p_nom_opt[sbatt].sum() > 1 and network.storage_units.p_nom_opt[shydr].sum() < 1:
(network.storage_units_t.p[sbatt].sum(axis=1).sort_values(ascending=False).reset_index() / network.storage_units.p_nom_opt[sbatt].sum())[0].plot(ax=ax, label='Battery storage', color='orangered') # depends on [control=['if'], data=[]]
elif network.storage_units.p_nom_opt[sbatt].sum() < 1 and network.storage_units.p_nom_opt[shydr].sum() > 1:
(network.storage_units_t.p[shydr].sum(axis=1).sort_values(ascending=False).reset_index() / network.storage_units.p_nom_opt[shydr].sum())[0].plot(ax=ax, label='Hydrogen storage', color='teal') # depends on [control=['if'], data=[]]
else:
(network.storage_units_t.p[sbatt].sum(axis=1).sort_values(ascending=False).reset_index() / network.storage_units.p_nom_opt[sbatt].sum())[0].plot(ax=ax, label='Battery storage', color='orangered')
(network.storage_units_t.p[shydr].sum(axis=1).sort_values(ascending=False).reset_index() / network.storage_units.p_nom_opt[shydr].sum())[0].plot(ax=ax, label='Hydrogen storage', color='teal')
ax.set_xlabel('')
ax.set_ylabel('Storage dispatch in p.u. \n <- charge - discharge ->')
ax.set_ylim([-1.05, 1.05])
ax.legend()
ax.set_title('Sorted duration curve of storage dispatch')
if filename is None:
plt.show() # depends on [control=['if'], data=[]]
else:
plt.savefig(filename, figsize=(3, 4), bbox_inches='tight')
plt.close()
return |
def remove(self, resource):
"""Removes a resource from the context"""
if isinstance(resource, Resource):
self._resources.remove(resource) | def function[remove, parameter[self, resource]]:
constant[Removes a resource from the context]
if call[name[isinstance], parameter[name[resource], name[Resource]]] begin[:]
call[name[self]._resources.remove, parameter[name[resource]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[resource] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[resource] , identifier[Resource] ):
identifier[self] . identifier[_resources] . identifier[remove] ( identifier[resource] ) | def remove(self, resource):
"""Removes a resource from the context"""
if isinstance(resource, Resource):
self._resources.remove(resource) # depends on [control=['if'], data=[]] |
def _args2_fpath(dpath, fname, cfgstr, ext):
r"""
Ensures that the filename is not too long
Internal util_cache helper function
Windows MAX_PATH=260 characters
Absolute length is limited to 32,000 characters
Each filename component is limited to 255 characters
Args:
dpath (str):
fname (str):
cfgstr (str):
ext (str):
Returns:
str: fpath
CommandLine:
python -m utool.util_cache --test-_args2_fpath
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> from utool.util_cache import _args2_fpath
>>> import utool as ut
>>> dpath = 'F:\\data\\work\\PZ_MTEST\\_ibsdb\\_ibeis_cache'
>>> fname = 'normalizer_'
>>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)'
>>> ext = '.cPkl'
>>> fpath = _args2_fpath(dpath, fname, cfgstr, ext)
>>> result = str(ut.ensure_unixslash(fpath))
>>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl'
>>> ut.assert_eq(result, target)
"""
if len(ext) > 0 and ext[0] != '.':
raise ValueError('Please be explicit and use a dot in ext')
max_len = 128
# should hashlen be larger?
cfgstr_hashlen = 16
prefix = fname
fname_cfgstr = consensed_cfgstr(prefix, cfgstr, max_len=max_len,
cfgstr_hashlen=cfgstr_hashlen)
fpath = join(dpath, fname_cfgstr + ext)
fpath = normpath(fpath)
return fpath | def function[_args2_fpath, parameter[dpath, fname, cfgstr, ext]]:
constant[
Ensures that the filename is not too long
Internal util_cache helper function
Windows MAX_PATH=260 characters
Absolute length is limited to 32,000 characters
Each filename component is limited to 255 characters
Args:
dpath (str):
fname (str):
cfgstr (str):
ext (str):
Returns:
str: fpath
CommandLine:
python -m utool.util_cache --test-_args2_fpath
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> from utool.util_cache import _args2_fpath
>>> import utool as ut
>>> dpath = 'F:\\data\\work\\PZ_MTEST\\_ibsdb\\_ibeis_cache'
>>> fname = 'normalizer_'
>>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)'
>>> ext = '.cPkl'
>>> fpath = _args2_fpath(dpath, fname, cfgstr, ext)
>>> result = str(ut.ensure_unixslash(fpath))
>>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl'
>>> ut.assert_eq(result, target)
]
if <ast.BoolOp object at 0x7da1b24af910> begin[:]
<ast.Raise object at 0x7da1b24af640>
variable[max_len] assign[=] constant[128]
variable[cfgstr_hashlen] assign[=] constant[16]
variable[prefix] assign[=] name[fname]
variable[fname_cfgstr] assign[=] call[name[consensed_cfgstr], parameter[name[prefix], name[cfgstr]]]
variable[fpath] assign[=] call[name[join], parameter[name[dpath], binary_operation[name[fname_cfgstr] + name[ext]]]]
variable[fpath] assign[=] call[name[normpath], parameter[name[fpath]]]
return[name[fpath]] | keyword[def] identifier[_args2_fpath] ( identifier[dpath] , identifier[fname] , identifier[cfgstr] , identifier[ext] ):
literal[string]
keyword[if] identifier[len] ( identifier[ext] )> literal[int] keyword[and] identifier[ext] [ literal[int] ]!= literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[max_len] = literal[int]
identifier[cfgstr_hashlen] = literal[int]
identifier[prefix] = identifier[fname]
identifier[fname_cfgstr] = identifier[consensed_cfgstr] ( identifier[prefix] , identifier[cfgstr] , identifier[max_len] = identifier[max_len] ,
identifier[cfgstr_hashlen] = identifier[cfgstr_hashlen] )
identifier[fpath] = identifier[join] ( identifier[dpath] , identifier[fname_cfgstr] + identifier[ext] )
identifier[fpath] = identifier[normpath] ( identifier[fpath] )
keyword[return] identifier[fpath] | def _args2_fpath(dpath, fname, cfgstr, ext):
"""
Ensures that the filename is not too long
Internal util_cache helper function
Windows MAX_PATH=260 characters
Absolute length is limited to 32,000 characters
Each filename component is limited to 255 characters
Args:
dpath (str):
fname (str):
cfgstr (str):
ext (str):
Returns:
str: fpath
CommandLine:
python -m utool.util_cache --test-_args2_fpath
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> from utool.util_cache import _args2_fpath
>>> import utool as ut
>>> dpath = 'F:\\\\data\\\\work\\\\PZ_MTEST\\\\_ibsdb\\\\_ibeis_cache'
>>> fname = 'normalizer_'
>>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)'
>>> ext = '.cPkl'
>>> fpath = _args2_fpath(dpath, fname, cfgstr, ext)
>>> result = str(ut.ensure_unixslash(fpath))
>>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl'
>>> ut.assert_eq(result, target)
"""
if len(ext) > 0 and ext[0] != '.':
raise ValueError('Please be explicit and use a dot in ext') # depends on [control=['if'], data=[]]
max_len = 128
# should hashlen be larger?
cfgstr_hashlen = 16
prefix = fname
fname_cfgstr = consensed_cfgstr(prefix, cfgstr, max_len=max_len, cfgstr_hashlen=cfgstr_hashlen)
fpath = join(dpath, fname_cfgstr + ext)
fpath = normpath(fpath)
return fpath |
def has_changed(self):
"""
Method to check if an image has changed
since it was last downloaded. By making
a head request, this check can be done
quicker that downloading and processing
the whole file.
"""
request = urllib_request.Request(self.url)
request.get_method = lambda: 'HEAD'
response = urllib_request.urlopen(request)
information = response.info()
if 'Last-Modified' in information:
last_modified = information['Last-Modified']
# Return False if the image has not been modified
if last_modified == self.image_last_modified:
return False
self.image_last_modified = last_modified
# Return True if the image has been modified
# or if the image has no last-modified header
return True | def function[has_changed, parameter[self]]:
constant[
Method to check if an image has changed
since it was last downloaded. By making
a head request, this check can be done
quicker that downloading and processing
the whole file.
]
variable[request] assign[=] call[name[urllib_request].Request, parameter[name[self].url]]
name[request].get_method assign[=] <ast.Lambda object at 0x7da20e954b20>
variable[response] assign[=] call[name[urllib_request].urlopen, parameter[name[request]]]
variable[information] assign[=] call[name[response].info, parameter[]]
if compare[constant[Last-Modified] in name[information]] begin[:]
variable[last_modified] assign[=] call[name[information]][constant[Last-Modified]]
if compare[name[last_modified] equal[==] name[self].image_last_modified] begin[:]
return[constant[False]]
name[self].image_last_modified assign[=] name[last_modified]
return[constant[True]] | keyword[def] identifier[has_changed] ( identifier[self] ):
literal[string]
identifier[request] = identifier[urllib_request] . identifier[Request] ( identifier[self] . identifier[url] )
identifier[request] . identifier[get_method] = keyword[lambda] : literal[string]
identifier[response] = identifier[urllib_request] . identifier[urlopen] ( identifier[request] )
identifier[information] = identifier[response] . identifier[info] ()
keyword[if] literal[string] keyword[in] identifier[information] :
identifier[last_modified] = identifier[information] [ literal[string] ]
keyword[if] identifier[last_modified] == identifier[self] . identifier[image_last_modified] :
keyword[return] keyword[False]
identifier[self] . identifier[image_last_modified] = identifier[last_modified]
keyword[return] keyword[True] | def has_changed(self):
"""
Method to check if an image has changed
since it was last downloaded. By making
a head request, this check can be done
quicker that downloading and processing
the whole file.
"""
request = urllib_request.Request(self.url)
request.get_method = lambda : 'HEAD'
response = urllib_request.urlopen(request)
information = response.info()
if 'Last-Modified' in information:
last_modified = information['Last-Modified']
# Return False if the image has not been modified
if last_modified == self.image_last_modified:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['information']]
self.image_last_modified = last_modified
# Return True if the image has been modified
# or if the image has no last-modified header
return True |
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens | def function[_parse_tokens, parameter[tokens]]:
constant[Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
]
variable[index] assign[=] constant[0]
variable[parsed_tokens] assign[=] list[[]]
variable[num_tokens] assign[=] call[name[len], parameter[name[tokens]]]
while compare[name[index] less[<] name[num_tokens]] begin[:]
variable[tok] assign[=] call[name[Token], parameter[<ast.Starred object at 0x7da18dc996c0>]]
assert[compare[name[tok].token_type not_equal[!=] name[token].INDENT]]
if compare[name[tok].token_type equal[==] name[tokenize].NEWLINE] begin[:]
break
if compare[name[tok].token_string in constant[([{]] begin[:]
<ast.Tuple object at 0x7da18dc99210> assign[=] call[name[_parse_container], parameter[name[tokens], name[index]]]
if <ast.UnaryOp object at 0x7da1b1b131c0> begin[:]
return[constant[None]]
call[name[parsed_tokens].append, parameter[name[container]]]
<ast.AugAssign object at 0x7da1b1b10df0>
return[name[parsed_tokens]] | keyword[def] identifier[_parse_tokens] ( identifier[tokens] ):
literal[string]
identifier[index] = literal[int]
identifier[parsed_tokens] =[]
identifier[num_tokens] = identifier[len] ( identifier[tokens] )
keyword[while] identifier[index] < identifier[num_tokens] :
identifier[tok] = identifier[Token] (* identifier[tokens] [ identifier[index] ])
keyword[assert] identifier[tok] . identifier[token_type] != identifier[token] . identifier[INDENT]
keyword[if] identifier[tok] . identifier[token_type] == identifier[tokenize] . identifier[NEWLINE] :
keyword[break]
keyword[if] identifier[tok] . identifier[token_string] keyword[in] literal[string] :
( identifier[container] , identifier[index] )= identifier[_parse_container] ( identifier[tokens] , identifier[index] )
keyword[if] keyword[not] identifier[container] :
keyword[return] keyword[None]
identifier[parsed_tokens] . identifier[append] ( identifier[container] )
keyword[else] :
identifier[parsed_tokens] . identifier[append] ( identifier[Atom] ( identifier[tok] ))
identifier[index] += literal[int]
keyword[return] identifier[parsed_tokens] | def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break # depends on [control=['if'], data=[]]
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None # depends on [control=['if'], data=[]]
parsed_tokens.append(container) # depends on [control=['if'], data=[]]
else:
parsed_tokens.append(Atom(tok))
index += 1 # depends on [control=['while'], data=['index']]
return parsed_tokens |
def get_issuer(self):
"""
Return the issuer of this certificate.
This creates a new :class:`X509Name` that wraps the underlying issuer
name field on the certificate. Modifying it will modify the underlying
certificate, and will have the effect of modifying any other
:class:`X509Name` that refers to this issuer.
:return: The issuer of this certificate.
:rtype: :class:`X509Name`
"""
name = self._get_name(_lib.X509_get_issuer_name)
self._issuer_invalidator.add(name)
return name | def function[get_issuer, parameter[self]]:
constant[
Return the issuer of this certificate.
This creates a new :class:`X509Name` that wraps the underlying issuer
name field on the certificate. Modifying it will modify the underlying
certificate, and will have the effect of modifying any other
:class:`X509Name` that refers to this issuer.
:return: The issuer of this certificate.
:rtype: :class:`X509Name`
]
variable[name] assign[=] call[name[self]._get_name, parameter[name[_lib].X509_get_issuer_name]]
call[name[self]._issuer_invalidator.add, parameter[name[name]]]
return[name[name]] | keyword[def] identifier[get_issuer] ( identifier[self] ):
literal[string]
identifier[name] = identifier[self] . identifier[_get_name] ( identifier[_lib] . identifier[X509_get_issuer_name] )
identifier[self] . identifier[_issuer_invalidator] . identifier[add] ( identifier[name] )
keyword[return] identifier[name] | def get_issuer(self):
"""
Return the issuer of this certificate.
This creates a new :class:`X509Name` that wraps the underlying issuer
name field on the certificate. Modifying it will modify the underlying
certificate, and will have the effect of modifying any other
:class:`X509Name` that refers to this issuer.
:return: The issuer of this certificate.
:rtype: :class:`X509Name`
"""
name = self._get_name(_lib.X509_get_issuer_name)
self._issuer_invalidator.add(name)
return name |
def source(self, request):
"""Pulls values off the request in the provided location
:param request: The flask request object to parse arguments from
"""
if isinstance(self.location, six.string_types):
value = getattr(request, self.location, MultiDict())
if callable(value):
value = value()
if value is not None:
return value
else:
values = MultiDict()
for l in self.location:
value = getattr(request, l, None)
if callable(value):
value = value()
if value is not None:
values.update(value)
return values
return MultiDict() | def function[source, parameter[self, request]]:
constant[Pulls values off the request in the provided location
:param request: The flask request object to parse arguments from
]
if call[name[isinstance], parameter[name[self].location, name[six].string_types]] begin[:]
variable[value] assign[=] call[name[getattr], parameter[name[request], name[self].location, call[name[MultiDict], parameter[]]]]
if call[name[callable], parameter[name[value]]] begin[:]
variable[value] assign[=] call[name[value], parameter[]]
if compare[name[value] is_not constant[None]] begin[:]
return[name[value]]
return[call[name[MultiDict], parameter[]]] | keyword[def] identifier[source] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[location] , identifier[six] . identifier[string_types] ):
identifier[value] = identifier[getattr] ( identifier[request] , identifier[self] . identifier[location] , identifier[MultiDict] ())
keyword[if] identifier[callable] ( identifier[value] ):
identifier[value] = identifier[value] ()
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[value]
keyword[else] :
identifier[values] = identifier[MultiDict] ()
keyword[for] identifier[l] keyword[in] identifier[self] . identifier[location] :
identifier[value] = identifier[getattr] ( identifier[request] , identifier[l] , keyword[None] )
keyword[if] identifier[callable] ( identifier[value] ):
identifier[value] = identifier[value] ()
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[values] . identifier[update] ( identifier[value] )
keyword[return] identifier[values]
keyword[return] identifier[MultiDict] () | def source(self, request):
"""Pulls values off the request in the provided location
:param request: The flask request object to parse arguments from
"""
if isinstance(self.location, six.string_types):
value = getattr(request, self.location, MultiDict())
if callable(value):
value = value() # depends on [control=['if'], data=[]]
if value is not None:
return value # depends on [control=['if'], data=['value']] # depends on [control=['if'], data=[]]
else:
values = MultiDict()
for l in self.location:
value = getattr(request, l, None)
if callable(value):
value = value() # depends on [control=['if'], data=[]]
if value is not None:
values.update(value) # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=['l']]
return values
return MultiDict() |
def get_disks(vm_):
'''
Return the disks of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <vm name>
'''
with _get_xapi_session() as xapi:
disk = {}
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False
for vbd in xapi.VM.get_VBDs(vm_uuid):
dev = xapi.VBD.get_device(vbd)
if not dev:
continue
prop = xapi.VBD.get_runtime_properties(vbd)
disk[dev] = {
'backend': prop['backend'],
'type': prop['device-type'],
'protocol': prop['protocol']
}
return disk | def function[get_disks, parameter[vm_]]:
constant[
Return the disks of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <vm name>
]
with call[name[_get_xapi_session], parameter[]] begin[:]
variable[disk] assign[=] dictionary[[], []]
variable[vm_uuid] assign[=] call[name[_get_label_uuid], parameter[name[xapi], constant[VM], name[vm_]]]
if compare[name[vm_uuid] is constant[False]] begin[:]
return[constant[False]]
for taget[name[vbd]] in starred[call[name[xapi].VM.get_VBDs, parameter[name[vm_uuid]]]] begin[:]
variable[dev] assign[=] call[name[xapi].VBD.get_device, parameter[name[vbd]]]
if <ast.UnaryOp object at 0x7da1b21f9960> begin[:]
continue
variable[prop] assign[=] call[name[xapi].VBD.get_runtime_properties, parameter[name[vbd]]]
call[name[disk]][name[dev]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21f9cc0>, <ast.Constant object at 0x7da1b21f8d00>, <ast.Constant object at 0x7da1b21f8250>], [<ast.Subscript object at 0x7da1b21f89a0>, <ast.Subscript object at 0x7da1b1c14520>, <ast.Subscript object at 0x7da1b1c15510>]]
return[name[disk]] | keyword[def] identifier[get_disks] ( identifier[vm_] ):
literal[string]
keyword[with] identifier[_get_xapi_session] () keyword[as] identifier[xapi] :
identifier[disk] ={}
identifier[vm_uuid] = identifier[_get_label_uuid] ( identifier[xapi] , literal[string] , identifier[vm_] )
keyword[if] identifier[vm_uuid] keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[for] identifier[vbd] keyword[in] identifier[xapi] . identifier[VM] . identifier[get_VBDs] ( identifier[vm_uuid] ):
identifier[dev] = identifier[xapi] . identifier[VBD] . identifier[get_device] ( identifier[vbd] )
keyword[if] keyword[not] identifier[dev] :
keyword[continue]
identifier[prop] = identifier[xapi] . identifier[VBD] . identifier[get_runtime_properties] ( identifier[vbd] )
identifier[disk] [ identifier[dev] ]={
literal[string] : identifier[prop] [ literal[string] ],
literal[string] : identifier[prop] [ literal[string] ],
literal[string] : identifier[prop] [ literal[string] ]
}
keyword[return] identifier[disk] | def get_disks(vm_):
"""
Return the disks of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <vm name>
"""
with _get_xapi_session() as xapi:
disk = {}
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False # depends on [control=['if'], data=[]]
for vbd in xapi.VM.get_VBDs(vm_uuid):
dev = xapi.VBD.get_device(vbd)
if not dev:
continue # depends on [control=['if'], data=[]]
prop = xapi.VBD.get_runtime_properties(vbd)
disk[dev] = {'backend': prop['backend'], 'type': prop['device-type'], 'protocol': prop['protocol']} # depends on [control=['for'], data=['vbd']]
return disk # depends on [control=['with'], data=['xapi']] |
def parse_phone(parts, allow_multiple=False):
"""
Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone
"""
# Get text substitutions (ex: 'three' -> '3')
text_subs = misc.phone_text_subs()
Small = text_subs['Small']
Magnitude = text_subs['Magnitude']
Others = text_subs['Others']
phone_pattern = r'1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})'
phone_pattern_spaces = r'1?\W?[2-9]\W?[0-8]\W?[0-9]\W?[2-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]'
found_phones = []
return_parts = []
# Check each part for phone # and remove from parts if found
for part in parts:
body = part
# remove '420' references to avoid false positives
body = re.sub(r'420 ?friendly', '', body)
body = body.replace(' 420 ', '')
body = body.replace('420 sp', '')
# Replace all disguising characters in the body
for key in Small:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Small[key]), body)
for key in Magnitude:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Magnitude[key]), body)
for key in Others:
body = re.sub(r'-?'+re.escape(key)+r'-?', str(Others[key]), body)
body = re.sub(r'\W', ' ', body)
body = re.sub(r' +', ' ', body)
if len(re.sub(r'\D', '', body)) < 10:
# Less than 10 numeric digits in part - no phone number here
return_parts.append(part)
continue;
phones = re.findall(phone_pattern, body)
if len(phones) == 0:
# No phone number in standard format
phones = re.findall(phone_pattern_spaces, body)
if len(phones) > 0:
# Phone number had spaces between digits
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
else:
# Found phone in standard format
for found in phones:
found_phones.append(re.sub(r'\D', '', found))
if found_phones:
# Phone has been found, remove from part)
for found in found_phones:
filtered_part = parser_helpers.remove_phone(part, found)
if re.sub(r'\W', '', filtered_part):
# get rid of now-empty parts
return_parts.append(filtered_part)
else:
# Phone not found yet, add part to output
return_parts.append(part)
if not allow_multiple:
# Get most commonly occurring phone
found_phone = ''
if len(found_phones) > 0:
found_phone = max(set(found_phones), key=found_phones.count)
# Return the phone along with the original parts (minus any occurrences of the phone number)
return (found_phone, return_parts)
else:
# return all phones
return (list(set(found_phones)), return_parts) | def function[parse_phone, parameter[parts, allow_multiple]]:
constant[
Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone
]
variable[text_subs] assign[=] call[name[misc].phone_text_subs, parameter[]]
variable[Small] assign[=] call[name[text_subs]][constant[Small]]
variable[Magnitude] assign[=] call[name[text_subs]][constant[Magnitude]]
variable[Others] assign[=] call[name[text_subs]][constant[Others]]
variable[phone_pattern] assign[=] constant[1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})]
variable[phone_pattern_spaces] assign[=] constant[1?\W?[2-9]\W?[0-8]\W?[0-9]\W?[2-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]]
variable[found_phones] assign[=] list[[]]
variable[return_parts] assign[=] list[[]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[body] assign[=] name[part]
variable[body] assign[=] call[name[re].sub, parameter[constant[420 ?friendly], constant[], name[body]]]
variable[body] assign[=] call[name[body].replace, parameter[constant[ 420 ], constant[]]]
variable[body] assign[=] call[name[body].replace, parameter[constant[420 sp], constant[]]]
for taget[name[key]] in starred[name[Small]] begin[:]
variable[body] assign[=] call[name[re].sub, parameter[binary_operation[binary_operation[constant[-?] + call[name[re].escape, parameter[name[key]]]] + constant[-?]], call[name[str], parameter[call[name[Small]][name[key]]]], name[body]]]
for taget[name[key]] in starred[name[Magnitude]] begin[:]
variable[body] assign[=] call[name[re].sub, parameter[binary_operation[binary_operation[constant[-?] + call[name[re].escape, parameter[name[key]]]] + constant[-?]], call[name[str], parameter[call[name[Magnitude]][name[key]]]], name[body]]]
for taget[name[key]] in starred[name[Others]] begin[:]
variable[body] assign[=] call[name[re].sub, parameter[binary_operation[binary_operation[constant[-?] + call[name[re].escape, parameter[name[key]]]] + constant[-?]], call[name[str], parameter[call[name[Others]][name[key]]]], name[body]]]
variable[body] assign[=] call[name[re].sub, parameter[constant[\W], constant[ ], name[body]]]
variable[body] assign[=] call[name[re].sub, parameter[constant[ +], constant[ ], name[body]]]
if compare[call[name[len], parameter[call[name[re].sub, parameter[constant[\D], constant[], name[body]]]]] less[<] constant[10]] begin[:]
call[name[return_parts].append, parameter[name[part]]]
continue
variable[phones] assign[=] call[name[re].findall, parameter[name[phone_pattern], name[body]]]
if compare[call[name[len], parameter[name[phones]]] equal[==] constant[0]] begin[:]
variable[phones] assign[=] call[name[re].findall, parameter[name[phone_pattern_spaces], name[body]]]
if compare[call[name[len], parameter[name[phones]]] greater[>] constant[0]] begin[:]
for taget[name[found]] in starred[name[phones]] begin[:]
call[name[found_phones].append, parameter[call[name[re].sub, parameter[constant[\D], constant[], name[found]]]]]
if name[found_phones] begin[:]
for taget[name[found]] in starred[name[found_phones]] begin[:]
variable[filtered_part] assign[=] call[name[parser_helpers].remove_phone, parameter[name[part], name[found]]]
if call[name[re].sub, parameter[constant[\W], constant[], name[filtered_part]]] begin[:]
call[name[return_parts].append, parameter[name[filtered_part]]]
if <ast.UnaryOp object at 0x7da1b0ca4070> begin[:]
variable[found_phone] assign[=] constant[]
if compare[call[name[len], parameter[name[found_phones]]] greater[>] constant[0]] begin[:]
variable[found_phone] assign[=] call[name[max], parameter[call[name[set], parameter[name[found_phones]]]]]
return[tuple[[<ast.Name object at 0x7da1b0b7ebf0>, <ast.Name object at 0x7da1b0b7fdf0>]]] | keyword[def] identifier[parse_phone] ( identifier[parts] , identifier[allow_multiple] = keyword[False] ):
literal[string]
identifier[text_subs] = identifier[misc] . identifier[phone_text_subs] ()
identifier[Small] = identifier[text_subs] [ literal[string] ]
identifier[Magnitude] = identifier[text_subs] [ literal[string] ]
identifier[Others] = identifier[text_subs] [ literal[string] ]
identifier[phone_pattern] = literal[string]
identifier[phone_pattern_spaces] = literal[string]
identifier[found_phones] =[]
identifier[return_parts] =[]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[body] = identifier[part]
identifier[body] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[body] )
identifier[body] = identifier[body] . identifier[replace] ( literal[string] , literal[string] )
identifier[body] = identifier[body] . identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[key] keyword[in] identifier[Small] :
identifier[body] = identifier[re] . identifier[sub] ( literal[string] + identifier[re] . identifier[escape] ( identifier[key] )+ literal[string] , identifier[str] ( identifier[Small] [ identifier[key] ]), identifier[body] )
keyword[for] identifier[key] keyword[in] identifier[Magnitude] :
identifier[body] = identifier[re] . identifier[sub] ( literal[string] + identifier[re] . identifier[escape] ( identifier[key] )+ literal[string] , identifier[str] ( identifier[Magnitude] [ identifier[key] ]), identifier[body] )
keyword[for] identifier[key] keyword[in] identifier[Others] :
identifier[body] = identifier[re] . identifier[sub] ( literal[string] + identifier[re] . identifier[escape] ( identifier[key] )+ literal[string] , identifier[str] ( identifier[Others] [ identifier[key] ]), identifier[body] )
identifier[body] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[body] )
identifier[body] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[body] )
keyword[if] identifier[len] ( identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[body] ))< literal[int] :
identifier[return_parts] . identifier[append] ( identifier[part] )
keyword[continue] ;
identifier[phones] = identifier[re] . identifier[findall] ( identifier[phone_pattern] , identifier[body] )
keyword[if] identifier[len] ( identifier[phones] )== literal[int] :
identifier[phones] = identifier[re] . identifier[findall] ( identifier[phone_pattern_spaces] , identifier[body] )
keyword[if] identifier[len] ( identifier[phones] )> literal[int] :
keyword[for] identifier[found] keyword[in] identifier[phones] :
identifier[found_phones] . identifier[append] ( identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[found] ))
keyword[else] :
keyword[for] identifier[found] keyword[in] identifier[phones] :
identifier[found_phones] . identifier[append] ( identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[found] ))
keyword[if] identifier[found_phones] :
keyword[for] identifier[found] keyword[in] identifier[found_phones] :
identifier[filtered_part] = identifier[parser_helpers] . identifier[remove_phone] ( identifier[part] , identifier[found] )
keyword[if] identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[filtered_part] ):
identifier[return_parts] . identifier[append] ( identifier[filtered_part] )
keyword[else] :
identifier[return_parts] . identifier[append] ( identifier[part] )
keyword[if] keyword[not] identifier[allow_multiple] :
identifier[found_phone] = literal[string]
keyword[if] identifier[len] ( identifier[found_phones] )> literal[int] :
identifier[found_phone] = identifier[max] ( identifier[set] ( identifier[found_phones] ), identifier[key] = identifier[found_phones] . identifier[count] )
keyword[return] ( identifier[found_phone] , identifier[return_parts] )
keyword[else] :
keyword[return] ( identifier[list] ( identifier[set] ( identifier[found_phones] )), identifier[return_parts] ) | def parse_phone(parts, allow_multiple=False):
"""
Parse the phone number from the ad's parts
parts -> The backpage ad's posting_body, separated into substrings
allow_multiple -> If false, arbitrarily chooses the most commonly occurring phone
"""
# Get text substitutions (ex: 'three' -> '3')
text_subs = misc.phone_text_subs()
Small = text_subs['Small']
Magnitude = text_subs['Magnitude']
Others = text_subs['Others']
phone_pattern = '1?(?:[2-9][0-8][0-9])\\s?(?:[2-9][0-9]{2})\\s?(?:[0-9]{2})\\s?(?:[0-9]{2})'
phone_pattern_spaces = '1?\\W?[2-9]\\W?[0-8]\\W?[0-9]\\W?[2-9]\\W?[0-9]\\W?[0-9]\\W?[0-9]\\W?[0-9]\\W?[0-9]\\W?[0-9]'
found_phones = []
return_parts = []
# Check each part for phone # and remove from parts if found
for part in parts:
body = part
# remove '420' references to avoid false positives
body = re.sub('420 ?friendly', '', body)
body = body.replace(' 420 ', '')
body = body.replace('420 sp', '')
# Replace all disguising characters in the body
for key in Small:
body = re.sub('-?' + re.escape(key) + '-?', str(Small[key]), body) # depends on [control=['for'], data=['key']]
for key in Magnitude:
body = re.sub('-?' + re.escape(key) + '-?', str(Magnitude[key]), body) # depends on [control=['for'], data=['key']]
for key in Others:
body = re.sub('-?' + re.escape(key) + '-?', str(Others[key]), body) # depends on [control=['for'], data=['key']]
body = re.sub('\\W', ' ', body)
body = re.sub(' +', ' ', body)
if len(re.sub('\\D', '', body)) < 10:
# Less than 10 numeric digits in part - no phone number here
return_parts.append(part)
continue # depends on [control=['if'], data=[]]
phones = re.findall(phone_pattern, body)
if len(phones) == 0:
# No phone number in standard format
phones = re.findall(phone_pattern_spaces, body)
if len(phones) > 0:
# Phone number had spaces between digits
for found in phones:
found_phones.append(re.sub('\\D', '', found)) # depends on [control=['for'], data=['found']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Found phone in standard format
for found in phones:
found_phones.append(re.sub('\\D', '', found)) # depends on [control=['for'], data=['found']]
if found_phones:
# Phone has been found, remove from part)
for found in found_phones:
filtered_part = parser_helpers.remove_phone(part, found) # depends on [control=['for'], data=['found']]
if re.sub('\\W', '', filtered_part):
# get rid of now-empty parts
return_parts.append(filtered_part) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Phone not found yet, add part to output
return_parts.append(part) # depends on [control=['for'], data=['part']]
if not allow_multiple:
# Get most commonly occurring phone
found_phone = ''
if len(found_phones) > 0:
found_phone = max(set(found_phones), key=found_phones.count) # depends on [control=['if'], data=[]]
# Return the phone along with the original parts (minus any occurrences of the phone number)
return (found_phone, return_parts) # depends on [control=['if'], data=[]]
else:
# return all phones
return (list(set(found_phones)), return_parts) |
def merge_table_records(self, table, record_data, match_column_names):
""" Responsys.mergeTableRecords call
Accepts:
InteractObject table
RecordData record_data
list match_column_names
Returns a MergeResult
"""
table = table.get_soap_object(self.client)
record_data = record_data.get_soap_object(self.client)
return MergeResult(self.call(
'mergeTableRecords', table, record_data, match_column_names)) | def function[merge_table_records, parameter[self, table, record_data, match_column_names]]:
constant[ Responsys.mergeTableRecords call
Accepts:
InteractObject table
RecordData record_data
list match_column_names
Returns a MergeResult
]
variable[table] assign[=] call[name[table].get_soap_object, parameter[name[self].client]]
variable[record_data] assign[=] call[name[record_data].get_soap_object, parameter[name[self].client]]
return[call[name[MergeResult], parameter[call[name[self].call, parameter[constant[mergeTableRecords], name[table], name[record_data], name[match_column_names]]]]]] | keyword[def] identifier[merge_table_records] ( identifier[self] , identifier[table] , identifier[record_data] , identifier[match_column_names] ):
literal[string]
identifier[table] = identifier[table] . identifier[get_soap_object] ( identifier[self] . identifier[client] )
identifier[record_data] = identifier[record_data] . identifier[get_soap_object] ( identifier[self] . identifier[client] )
keyword[return] identifier[MergeResult] ( identifier[self] . identifier[call] (
literal[string] , identifier[table] , identifier[record_data] , identifier[match_column_names] )) | def merge_table_records(self, table, record_data, match_column_names):
""" Responsys.mergeTableRecords call
Accepts:
InteractObject table
RecordData record_data
list match_column_names
Returns a MergeResult
"""
table = table.get_soap_object(self.client)
record_data = record_data.get_soap_object(self.client)
return MergeResult(self.call('mergeTableRecords', table, record_data, match_column_names)) |
def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) | def function[volume, parameter[self, volume]]:
constant[See `volume`.]
variable[volume] assign[=] call[name[int], parameter[name[volume]]]
name[self]._volume assign[=] call[name[max], parameter[constant[0], call[name[min], parameter[name[volume], constant[100]]]]] | keyword[def] identifier[volume] ( identifier[self] , identifier[volume] ):
literal[string]
identifier[volume] = identifier[int] ( identifier[volume] )
identifier[self] . identifier[_volume] = identifier[max] ( literal[int] , identifier[min] ( identifier[volume] , literal[int] )) | def volume(self, volume):
"""See `volume`."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) |
def get_wordlist(language, word_source):
""" Takes in a language and a word source and returns a matching wordlist,
if it exists.
Valid languages: ['english']
Valid word sources: ['bip39', 'wiktionary', 'google']
"""
try:
wordlist_string = eval(language + '_words_' + word_source)
except NameError:
raise Exception("No wordlist could be found for the word source and language provided.")
wordlist = wordlist_string.split(',')
return wordlist | def function[get_wordlist, parameter[language, word_source]]:
constant[ Takes in a language and a word source and returns a matching wordlist,
if it exists.
Valid languages: ['english']
Valid word sources: ['bip39', 'wiktionary', 'google']
]
<ast.Try object at 0x7da1b103b250>
variable[wordlist] assign[=] call[name[wordlist_string].split, parameter[constant[,]]]
return[name[wordlist]] | keyword[def] identifier[get_wordlist] ( identifier[language] , identifier[word_source] ):
literal[string]
keyword[try] :
identifier[wordlist_string] = identifier[eval] ( identifier[language] + literal[string] + identifier[word_source] )
keyword[except] identifier[NameError] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[wordlist] = identifier[wordlist_string] . identifier[split] ( literal[string] )
keyword[return] identifier[wordlist] | def get_wordlist(language, word_source):
""" Takes in a language and a word source and returns a matching wordlist,
if it exists.
Valid languages: ['english']
Valid word sources: ['bip39', 'wiktionary', 'google']
"""
try:
wordlist_string = eval(language + '_words_' + word_source) # depends on [control=['try'], data=[]]
except NameError:
raise Exception('No wordlist could be found for the word source and language provided.') # depends on [control=['except'], data=[]]
wordlist = wordlist_string.split(',')
return wordlist |
def get(id_, hwid, type_, unit, precision, as_json):
"""Get temperature of a specific sensor"""
if id_ and (hwid or type_):
raise click.BadOptionUsage(
"If --id is given --hwid and --type are not allowed."
)
if id_:
try:
sensor = W1ThermSensor.get_available_sensors()[id_ - 1]
except IndexError:
raise click.BadOptionUsage(
"No sensor with id {0} available. "
"Use the ls command to show all available sensors.".format(id_)
)
else:
sensor = W1ThermSensor(type_, hwid)
if precision:
sensor.set_precision(precision, persist=False)
temperature = sensor.get_temperature(unit)
if as_json:
data = {
"hwid": sensor.id,
"type": sensor.type_name,
"temperature": temperature,
"unit": unit,
}
click.echo(json.dumps(data, indent=4, sort_keys=True))
else:
click.echo(
"Sensor {0} measured temperature: {1} {2}".format(
click.style(sensor.id, bold=True),
click.style(str(temperature), bold=True),
click.style(unit, bold=True),
)
) | def function[get, parameter[id_, hwid, type_, unit, precision, as_json]]:
constant[Get temperature of a specific sensor]
if <ast.BoolOp object at 0x7da1b2294a00> begin[:]
<ast.Raise object at 0x7da1b2296230>
if name[id_] begin[:]
<ast.Try object at 0x7da1b23edc60>
if name[precision] begin[:]
call[name[sensor].set_precision, parameter[name[precision]]]
variable[temperature] assign[=] call[name[sensor].get_temperature, parameter[name[unit]]]
if name[as_json] begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2382e90>, <ast.Constant object at 0x7da1b2382a40>, <ast.Constant object at 0x7da1b2380af0>, <ast.Constant object at 0x7da1b23810f0>], [<ast.Attribute object at 0x7da1b2383790>, <ast.Attribute object at 0x7da1b23838b0>, <ast.Name object at 0x7da1b2383820>, <ast.Name object at 0x7da1b2382260>]]
call[name[click].echo, parameter[call[name[json].dumps, parameter[name[data]]]]] | keyword[def] identifier[get] ( identifier[id_] , identifier[hwid] , identifier[type_] , identifier[unit] , identifier[precision] , identifier[as_json] ):
literal[string]
keyword[if] identifier[id_] keyword[and] ( identifier[hwid] keyword[or] identifier[type_] ):
keyword[raise] identifier[click] . identifier[BadOptionUsage] (
literal[string]
)
keyword[if] identifier[id_] :
keyword[try] :
identifier[sensor] = identifier[W1ThermSensor] . identifier[get_available_sensors] ()[ identifier[id_] - literal[int] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[click] . identifier[BadOptionUsage] (
literal[string]
literal[string] . identifier[format] ( identifier[id_] )
)
keyword[else] :
identifier[sensor] = identifier[W1ThermSensor] ( identifier[type_] , identifier[hwid] )
keyword[if] identifier[precision] :
identifier[sensor] . identifier[set_precision] ( identifier[precision] , identifier[persist] = keyword[False] )
identifier[temperature] = identifier[sensor] . identifier[get_temperature] ( identifier[unit] )
keyword[if] identifier[as_json] :
identifier[data] ={
literal[string] : identifier[sensor] . identifier[id] ,
literal[string] : identifier[sensor] . identifier[type_name] ,
literal[string] : identifier[temperature] ,
literal[string] : identifier[unit] ,
}
identifier[click] . identifier[echo] ( identifier[json] . identifier[dumps] ( identifier[data] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] ))
keyword[else] :
identifier[click] . identifier[echo] (
literal[string] . identifier[format] (
identifier[click] . identifier[style] ( identifier[sensor] . identifier[id] , identifier[bold] = keyword[True] ),
identifier[click] . identifier[style] ( identifier[str] ( identifier[temperature] ), identifier[bold] = keyword[True] ),
identifier[click] . identifier[style] ( identifier[unit] , identifier[bold] = keyword[True] ),
)
) | def get(id_, hwid, type_, unit, precision, as_json):
"""Get temperature of a specific sensor"""
if id_ and (hwid or type_):
raise click.BadOptionUsage('If --id is given --hwid and --type are not allowed.') # depends on [control=['if'], data=[]]
if id_:
try:
sensor = W1ThermSensor.get_available_sensors()[id_ - 1] # depends on [control=['try'], data=[]]
except IndexError:
raise click.BadOptionUsage('No sensor with id {0} available. Use the ls command to show all available sensors.'.format(id_)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
sensor = W1ThermSensor(type_, hwid)
if precision:
sensor.set_precision(precision, persist=False) # depends on [control=['if'], data=[]]
temperature = sensor.get_temperature(unit)
if as_json:
data = {'hwid': sensor.id, 'type': sensor.type_name, 'temperature': temperature, 'unit': unit}
click.echo(json.dumps(data, indent=4, sort_keys=True)) # depends on [control=['if'], data=[]]
else:
click.echo('Sensor {0} measured temperature: {1} {2}'.format(click.style(sensor.id, bold=True), click.style(str(temperature), bold=True), click.style(unit, bold=True))) |
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, batch_size=None,
feed=None,
args=None):
"""
A helper function that computes a tensor on numpy inputs by batches.
This version uses exactly the tensorflow graph constructed by the
caller, so the caller can place specific ops on specific devices
to implement model parallelism.
Most users probably prefer `batch_eval_multi_worker` which maps
a single-device expression to multiple devices in order to evaluate
faster by parallelizing across data.
:param sess: tf Session to use
:param tf_inputs: list of tf Placeholders to feed from the dataset
:param tf_outputs: list of tf tensors to calculate
:param numpy_inputs: list of numpy arrays defining the dataset
:param batch_size: int, batch size to use for evaluation
If not specified, this function will try to guess the batch size,
but might get an out of memory error or run the model with an
unsupported batch size, etc.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Deprecated and included only for backwards compatibility.
Should contain `batch_size`
"""
if args is not None:
warnings.warn("`args` is deprecated and will be removed on or "
"after 2019-03-09. Pass `batch_size` directly.")
if "batch_size" in args:
assert batch_size is None
batch_size = args["batch_size"]
if batch_size is None:
batch_size = DEFAULT_EXAMPLES_PER_DEVICE
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in range(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
for start in range(0, m, batch_size):
batch = start // batch_size
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Compute batch start and end indices
start = batch * batch_size
end = start + batch_size
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= batch_size
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
if feed is not None:
feed_dict.update(feed)
numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out | def function[batch_eval, parameter[sess, tf_inputs, tf_outputs, numpy_inputs, batch_size, feed, args]]:
constant[
A helper function that computes a tensor on numpy inputs by batches.
This version uses exactly the tensorflow graph constructed by the
caller, so the caller can place specific ops on specific devices
to implement model parallelism.
Most users probably prefer `batch_eval_multi_worker` which maps
a single-device expression to multiple devices in order to evaluate
faster by parallelizing across data.
:param sess: tf Session to use
:param tf_inputs: list of tf Placeholders to feed from the dataset
:param tf_outputs: list of tf tensors to calculate
:param numpy_inputs: list of numpy arrays defining the dataset
:param batch_size: int, batch size to use for evaluation
If not specified, this function will try to guess the batch size,
but might get an out of memory error or run the model with an
unsupported batch size, etc.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Deprecated and included only for backwards compatibility.
Should contain `batch_size`
]
if compare[name[args] is_not constant[None]] begin[:]
call[name[warnings].warn, parameter[constant[`args` is deprecated and will be removed on or after 2019-03-09. Pass `batch_size` directly.]]]
if compare[constant[batch_size] in name[args]] begin[:]
assert[compare[name[batch_size] is constant[None]]]
variable[batch_size] assign[=] call[name[args]][constant[batch_size]]
if compare[name[batch_size] is constant[None]] begin[:]
variable[batch_size] assign[=] name[DEFAULT_EXAMPLES_PER_DEVICE]
variable[n] assign[=] call[name[len], parameter[name[numpy_inputs]]]
assert[compare[name[n] greater[>] constant[0]]]
assert[compare[name[n] equal[==] call[name[len], parameter[name[tf_inputs]]]]]
variable[m] assign[=] call[call[name[numpy_inputs]][constant[0]].shape][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[n]]]] begin[:]
assert[compare[call[call[name[numpy_inputs]][name[i]].shape][constant[0]] equal[==] name[m]]]
variable[out] assign[=] list[[]]
for taget[name[_]] in starred[name[tf_outputs]] begin[:]
call[name[out].append, parameter[list[[]]]]
for taget[name[start]] in starred[call[name[range], parameter[constant[0], name[m], name[batch_size]]]] begin[:]
variable[batch] assign[=] binary_operation[name[start] <ast.FloorDiv object at 0x7da2590d6bc0> name[batch_size]]
if <ast.BoolOp object at 0x7da207f99780> begin[:]
call[name[_logger].debug, parameter[binary_operation[constant[Batch ] + call[name[str], parameter[name[batch]]]]]]
variable[start] assign[=] binary_operation[name[batch] * name[batch_size]]
variable[end] assign[=] binary_operation[name[start] + name[batch_size]]
variable[numpy_input_batches] assign[=] <ast.ListComp object at 0x7da204961c90>
variable[cur_batch_size] assign[=] call[call[name[numpy_input_batches]][constant[0]].shape][constant[0]]
assert[compare[name[cur_batch_size] less_or_equal[<=] name[batch_size]]]
for taget[name[e]] in starred[name[numpy_input_batches]] begin[:]
assert[compare[call[name[e].shape][constant[0]] equal[==] name[cur_batch_size]]]
variable[feed_dict] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[tf_inputs], name[numpy_input_batches]]]]]
if compare[name[feed] is_not constant[None]] begin[:]
call[name[feed_dict].update, parameter[name[feed]]]
variable[numpy_output_batches] assign[=] call[name[sess].run, parameter[name[tf_outputs]]]
for taget[name[e]] in starred[name[numpy_output_batches]] begin[:]
assert[compare[call[name[e].shape][constant[0]] equal[==] name[cur_batch_size]]]
for taget[tuple[[<ast.Name object at 0x7da2049603d0>, <ast.Name object at 0x7da204961c60>]]] in starred[call[name[zip], parameter[name[out], name[numpy_output_batches]]]] begin[:]
call[name[out_elem].append, parameter[name[numpy_output_batch]]]
variable[out] assign[=] <ast.ListComp object at 0x7da2049634c0>
for taget[name[e]] in starred[name[out]] begin[:]
assert[compare[call[name[e].shape][constant[0]] equal[==] name[m]]]
return[name[out]] | keyword[def] identifier[batch_eval] ( identifier[sess] , identifier[tf_inputs] , identifier[tf_outputs] , identifier[numpy_inputs] , identifier[batch_size] = keyword[None] ,
identifier[feed] = keyword[None] ,
identifier[args] = keyword[None] ):
literal[string]
keyword[if] identifier[args] keyword[is] keyword[not] keyword[None] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] )
keyword[if] literal[string] keyword[in] identifier[args] :
keyword[assert] identifier[batch_size] keyword[is] keyword[None]
identifier[batch_size] = identifier[args] [ literal[string] ]
keyword[if] identifier[batch_size] keyword[is] keyword[None] :
identifier[batch_size] = identifier[DEFAULT_EXAMPLES_PER_DEVICE]
identifier[n] = identifier[len] ( identifier[numpy_inputs] )
keyword[assert] identifier[n] > literal[int]
keyword[assert] identifier[n] == identifier[len] ( identifier[tf_inputs] )
identifier[m] = identifier[numpy_inputs] [ literal[int] ]. identifier[shape] [ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] ):
keyword[assert] identifier[numpy_inputs] [ identifier[i] ]. identifier[shape] [ literal[int] ]== identifier[m]
identifier[out] =[]
keyword[for] identifier[_] keyword[in] identifier[tf_outputs] :
identifier[out] . identifier[append] ([])
keyword[for] identifier[start] keyword[in] identifier[range] ( literal[int] , identifier[m] , identifier[batch_size] ):
identifier[batch] = identifier[start] // identifier[batch_size]
keyword[if] identifier[batch] % literal[int] == literal[int] keyword[and] identifier[batch] > literal[int] :
identifier[_logger] . identifier[debug] ( literal[string] + identifier[str] ( identifier[batch] ))
identifier[start] = identifier[batch] * identifier[batch_size]
identifier[end] = identifier[start] + identifier[batch_size]
identifier[numpy_input_batches] =[ identifier[numpy_input] [ identifier[start] : identifier[end] ]
keyword[for] identifier[numpy_input] keyword[in] identifier[numpy_inputs] ]
identifier[cur_batch_size] = identifier[numpy_input_batches] [ literal[int] ]. identifier[shape] [ literal[int] ]
keyword[assert] identifier[cur_batch_size] <= identifier[batch_size]
keyword[for] identifier[e] keyword[in] identifier[numpy_input_batches] :
keyword[assert] identifier[e] . identifier[shape] [ literal[int] ]== identifier[cur_batch_size]
identifier[feed_dict] = identifier[dict] ( identifier[zip] ( identifier[tf_inputs] , identifier[numpy_input_batches] ))
keyword[if] identifier[feed] keyword[is] keyword[not] keyword[None] :
identifier[feed_dict] . identifier[update] ( identifier[feed] )
identifier[numpy_output_batches] = identifier[sess] . identifier[run] ( identifier[tf_outputs] , identifier[feed_dict] = identifier[feed_dict] )
keyword[for] identifier[e] keyword[in] identifier[numpy_output_batches] :
keyword[assert] identifier[e] . identifier[shape] [ literal[int] ]== identifier[cur_batch_size] , identifier[e] . identifier[shape]
keyword[for] identifier[out_elem] , identifier[numpy_output_batch] keyword[in] identifier[zip] ( identifier[out] , identifier[numpy_output_batches] ):
identifier[out_elem] . identifier[append] ( identifier[numpy_output_batch] )
identifier[out] =[ identifier[np] . identifier[concatenate] ( identifier[x] , identifier[axis] = literal[int] ) keyword[for] identifier[x] keyword[in] identifier[out] ]
keyword[for] identifier[e] keyword[in] identifier[out] :
keyword[assert] identifier[e] . identifier[shape] [ literal[int] ]== identifier[m] , identifier[e] . identifier[shape]
keyword[return] identifier[out] | def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, batch_size=None, feed=None, args=None):
"""
A helper function that computes a tensor on numpy inputs by batches.
This version uses exactly the tensorflow graph constructed by the
caller, so the caller can place specific ops on specific devices
to implement model parallelism.
Most users probably prefer `batch_eval_multi_worker` which maps
a single-device expression to multiple devices in order to evaluate
faster by parallelizing across data.
:param sess: tf Session to use
:param tf_inputs: list of tf Placeholders to feed from the dataset
:param tf_outputs: list of tf tensors to calculate
:param numpy_inputs: list of numpy arrays defining the dataset
:param batch_size: int, batch size to use for evaluation
If not specified, this function will try to guess the batch size,
but might get an out of memory error or run the model with an
unsupported batch size, etc.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Deprecated and included only for backwards compatibility.
Should contain `batch_size`
"""
if args is not None:
warnings.warn('`args` is deprecated and will be removed on or after 2019-03-09. Pass `batch_size` directly.')
if 'batch_size' in args:
assert batch_size is None
batch_size = args['batch_size'] # depends on [control=['if'], data=['args']] # depends on [control=['if'], data=['args']]
if batch_size is None:
batch_size = DEFAULT_EXAMPLES_PER_DEVICE # depends on [control=['if'], data=['batch_size']]
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in range(1, n):
assert numpy_inputs[i].shape[0] == m # depends on [control=['for'], data=['i']]
out = []
for _ in tf_outputs:
out.append([]) # depends on [control=['for'], data=[]]
for start in range(0, m, batch_size):
batch = start // batch_size
if batch % 100 == 0 and batch > 0:
_logger.debug('Batch ' + str(batch)) # depends on [control=['if'], data=[]]
# Compute batch start and end indices
start = batch * batch_size
end = start + batch_size
numpy_input_batches = [numpy_input[start:end] for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= batch_size
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size # depends on [control=['for'], data=['e']]
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
if feed is not None:
feed_dict.update(feed) # depends on [control=['if'], data=['feed']]
numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape # depends on [control=['for'], data=['e']]
for (out_elem, numpy_output_batch) in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['start']]
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape # depends on [control=['for'], data=['e']]
return out |
def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if(target_word.startswith(query_word)):
return query_word
return False | def function[anyword_substring_search_inner, parameter[query_word, target_words]]:
constant[ return True if ANY target_word matches a query_word
]
for taget[name[target_word]] in starred[name[target_words]] begin[:]
if call[name[target_word].startswith, parameter[name[query_word]]] begin[:]
return[name[query_word]]
return[constant[False]] | keyword[def] identifier[anyword_substring_search_inner] ( identifier[query_word] , identifier[target_words] ):
literal[string]
keyword[for] identifier[target_word] keyword[in] identifier[target_words] :
keyword[if] ( identifier[target_word] . identifier[startswith] ( identifier[query_word] )):
keyword[return] identifier[query_word]
keyword[return] keyword[False] | def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if target_word.startswith(query_word):
return query_word # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target_word']]
return False |
def plot_isobar(self, P, Tmin=None, Tmax=None, methods_P=[], pts=50,
only_valid=True): # pragma: no cover
r'''Method to create a plot of the property vs temperature at a
specific pressure according to
either a specified list of methods, or user methods (if set), or all
methods. User-selectable number of points, and temperature range. If
only_valid is set,`test_method_validity_P` will be used to check if
each condition in the specified range is valid, and
`test_property_validity` will be used to test the answer, and the
method is allowed to fail; only the valid points will be plotted.
Otherwise, the result will be calculated and displayed as-is. This will
not suceed if the method fails.
Parameters
----------
P : float
Pressure for the isobar, [Pa]
Tmin : float
Minimum temperature, to begin calculating the property, [K]
Tmax : float
Maximum temperature, to stop calculating the property, [K]
methods_P : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at; if Tmin to Tmax
covers a wide range of method validities, only a few points may end
up calculated for a given method so this may need to be large
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
'''
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting')
if Tmin is None:
if self.Tmin is not None:
Tmin = self.Tmin
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it')
if Tmax is None:
if self.Tmax is not None:
Tmax = self.Tmax
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it')
if not methods_P:
if self.user_methods_P:
methods_P = self.user_methods_P
else:
methods_P = self.all_methods_P
Ts = np.linspace(Tmin, Tmax, pts)
for method_P in methods_P:
if only_valid:
properties, Ts2 = [], []
for T in Ts:
if self.test_method_validity_P(T, P, method_P):
try:
p = self.calculate_P(T, P, method_P)
if self.test_property_validity(p):
properties.append(p)
Ts2.append(T)
except:
pass
plt.plot(Ts2, properties, label=method_P)
else:
properties = [self.calculate_P(T, P, method_P) for T in Ts]
plt.plot(Ts, properties, label=method_P)
plt.legend(loc='best')
plt.ylabel(self.name + ', ' + self.units)
plt.xlabel('Temperature, K')
plt.title(self.name + ' of ' + self.CASRN)
plt.show() | def function[plot_isobar, parameter[self, P, Tmin, Tmax, methods_P, pts, only_valid]]:
constant[Method to create a plot of the property vs temperature at a
specific pressure according to
either a specified list of methods, or user methods (if set), or all
methods. User-selectable number of points, and temperature range. If
only_valid is set,`test_method_validity_P` will be used to check if
each condition in the specified range is valid, and
`test_property_validity` will be used to test the answer, and the
method is allowed to fail; only the valid points will be plotted.
Otherwise, the result will be calculated and displayed as-is. This will
not suceed if the method fails.
Parameters
----------
P : float
Pressure for the isobar, [Pa]
Tmin : float
Minimum temperature, to begin calculating the property, [K]
Tmax : float
Maximum temperature, to stop calculating the property, [K]
methods_P : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at; if Tmin to Tmax
covers a wide range of method validities, only a few points may end
up calculated for a given method so this may need to be large
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
]
if <ast.UnaryOp object at 0x7da2046239a0> begin[:]
<ast.Raise object at 0x7da204620130>
if compare[name[Tmin] is constant[None]] begin[:]
if compare[name[self].Tmin is_not constant[None]] begin[:]
variable[Tmin] assign[=] name[self].Tmin
if compare[name[Tmax] is constant[None]] begin[:]
if compare[name[self].Tmax is_not constant[None]] begin[:]
variable[Tmax] assign[=] name[self].Tmax
if <ast.UnaryOp object at 0x7da204621630> begin[:]
if name[self].user_methods_P begin[:]
variable[methods_P] assign[=] name[self].user_methods_P
variable[Ts] assign[=] call[name[np].linspace, parameter[name[Tmin], name[Tmax], name[pts]]]
for taget[name[method_P]] in starred[name[methods_P]] begin[:]
if name[only_valid] begin[:]
<ast.Tuple object at 0x7da204621210> assign[=] tuple[[<ast.List object at 0x7da204622530>, <ast.List object at 0x7da204621000>]]
for taget[name[T]] in starred[name[Ts]] begin[:]
if call[name[self].test_method_validity_P, parameter[name[T], name[P], name[method_P]]] begin[:]
<ast.Try object at 0x7da204621de0>
call[name[plt].plot, parameter[name[Ts2], name[properties]]]
call[name[plt].legend, parameter[]]
call[name[plt].ylabel, parameter[binary_operation[binary_operation[name[self].name + constant[, ]] + name[self].units]]]
call[name[plt].xlabel, parameter[constant[Temperature, K]]]
call[name[plt].title, parameter[binary_operation[binary_operation[name[self].name + constant[ of ]] + name[self].CASRN]]]
call[name[plt].show, parameter[]] | keyword[def] identifier[plot_isobar] ( identifier[self] , identifier[P] , identifier[Tmin] = keyword[None] , identifier[Tmax] = keyword[None] , identifier[methods_P] =[], identifier[pts] = literal[int] ,
identifier[only_valid] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[has_matplotlib] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[Tmin] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[Tmin] keyword[is] keyword[not] keyword[None] :
identifier[Tmin] = identifier[self] . identifier[Tmin]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[Tmax] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[Tmax] keyword[is] keyword[not] keyword[None] :
identifier[Tmax] = identifier[self] . identifier[Tmax]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] keyword[not] identifier[methods_P] :
keyword[if] identifier[self] . identifier[user_methods_P] :
identifier[methods_P] = identifier[self] . identifier[user_methods_P]
keyword[else] :
identifier[methods_P] = identifier[self] . identifier[all_methods_P]
identifier[Ts] = identifier[np] . identifier[linspace] ( identifier[Tmin] , identifier[Tmax] , identifier[pts] )
keyword[for] identifier[method_P] keyword[in] identifier[methods_P] :
keyword[if] identifier[only_valid] :
identifier[properties] , identifier[Ts2] =[],[]
keyword[for] identifier[T] keyword[in] identifier[Ts] :
keyword[if] identifier[self] . identifier[test_method_validity_P] ( identifier[T] , identifier[P] , identifier[method_P] ):
keyword[try] :
identifier[p] = identifier[self] . identifier[calculate_P] ( identifier[T] , identifier[P] , identifier[method_P] )
keyword[if] identifier[self] . identifier[test_property_validity] ( identifier[p] ):
identifier[properties] . identifier[append] ( identifier[p] )
identifier[Ts2] . identifier[append] ( identifier[T] )
keyword[except] :
keyword[pass]
identifier[plt] . identifier[plot] ( identifier[Ts2] , identifier[properties] , identifier[label] = identifier[method_P] )
keyword[else] :
identifier[properties] =[ identifier[self] . identifier[calculate_P] ( identifier[T] , identifier[P] , identifier[method_P] ) keyword[for] identifier[T] keyword[in] identifier[Ts] ]
identifier[plt] . identifier[plot] ( identifier[Ts] , identifier[properties] , identifier[label] = identifier[method_P] )
identifier[plt] . identifier[legend] ( identifier[loc] = literal[string] )
identifier[plt] . identifier[ylabel] ( identifier[self] . identifier[name] + literal[string] + identifier[self] . identifier[units] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[title] ( identifier[self] . identifier[name] + literal[string] + identifier[self] . identifier[CASRN] )
identifier[plt] . identifier[show] () | def plot_isobar(self, P, Tmin=None, Tmax=None, methods_P=[], pts=50, only_valid=True): # pragma: no cover
'Method to create a plot of the property vs temperature at a \n specific pressure according to\n either a specified list of methods, or user methods (if set), or all\n methods. User-selectable number of points, and temperature range. If\n only_valid is set,`test_method_validity_P` will be used to check if \n each condition in the specified range is valid, and\n `test_property_validity` will be used to test the answer, and the\n method is allowed to fail; only the valid points will be plotted.\n Otherwise, the result will be calculated and displayed as-is. This will\n not suceed if the method fails.\n\n Parameters\n ----------\n P : float\n Pressure for the isobar, [Pa]\n Tmin : float\n Minimum temperature, to begin calculating the property, [K]\n Tmax : float\n Maximum temperature, to stop calculating the property, [K]\n methods_P : list, optional\n List of methods to consider\n pts : int, optional\n A list of points to calculate the property at; if Tmin to Tmax\n covers a wide range of method validities, only a few points may end\n up calculated for a given method so this may need to be large\n only_valid : bool\n If True, only plot successful methods and calculated properties,\n and handle errors; if False, attempt calculation without any\n checking and use methods outside their bounds\n '
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting') # depends on [control=['if'], data=[]]
if Tmin is None:
if self.Tmin is not None:
Tmin = self.Tmin # depends on [control=['if'], data=[]]
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it') # depends on [control=['if'], data=['Tmin']]
if Tmax is None:
if self.Tmax is not None:
Tmax = self.Tmax # depends on [control=['if'], data=[]]
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it') # depends on [control=['if'], data=['Tmax']]
if not methods_P:
if self.user_methods_P:
methods_P = self.user_methods_P # depends on [control=['if'], data=[]]
else:
methods_P = self.all_methods_P # depends on [control=['if'], data=[]]
Ts = np.linspace(Tmin, Tmax, pts)
for method_P in methods_P:
if only_valid:
(properties, Ts2) = ([], [])
for T in Ts:
if self.test_method_validity_P(T, P, method_P):
try:
p = self.calculate_P(T, P, method_P)
if self.test_property_validity(p):
properties.append(p)
Ts2.append(T) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['T']]
plt.plot(Ts2, properties, label=method_P) # depends on [control=['if'], data=[]]
else:
properties = [self.calculate_P(T, P, method_P) for T in Ts]
plt.plot(Ts, properties, label=method_P) # depends on [control=['for'], data=['method_P']]
plt.legend(loc='best')
plt.ylabel(self.name + ', ' + self.units)
plt.xlabel('Temperature, K')
plt.title(self.name + ' of ' + self.CASRN)
plt.show() |
def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
"""
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError(
"Expected %s to be a tuple or list. "
"Received %r" % (arg_name, tuple_or_list)
)
return list(tuple_or_list) | def function[_ensure_tuple_or_list, parameter[arg_name, tuple_or_list]]:
constant[Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
]
if <ast.UnaryOp object at 0x7da207f00d90> begin[:]
<ast.Raise object at 0x7da207f00520>
return[call[name[list], parameter[name[tuple_or_list]]]] | keyword[def] identifier[_ensure_tuple_or_list] ( identifier[arg_name] , identifier[tuple_or_list] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[tuple_or_list] ,( identifier[tuple] , identifier[list] )):
keyword[raise] identifier[TypeError] (
literal[string]
literal[string] %( identifier[arg_name] , identifier[tuple_or_list] )
)
keyword[return] identifier[list] ( identifier[tuple_or_list] ) | def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
"""
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError('Expected %s to be a tuple or list. Received %r' % (arg_name, tuple_or_list)) # depends on [control=['if'], data=[]]
return list(tuple_or_list) |
def get_job_class(klass_str):
"""
Return the job class
"""
mod_name, klass_name = klass_str.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
except ImportError as e:
logger.error("Error importing job module %s: '%s'", mod_name, e)
return
try:
klass = getattr(mod, klass_name)
except AttributeError:
logger.error("Module '%s' does not define a '%s' class", mod_name, klass_name)
return
return klass | def function[get_job_class, parameter[klass_str]]:
constant[
Return the job class
]
<ast.Tuple object at 0x7da1b1121d50> assign[=] call[name[klass_str].rsplit, parameter[constant[.], constant[1]]]
<ast.Try object at 0x7da1b1123610>
<ast.Try object at 0x7da1b1121870>
return[name[klass]] | keyword[def] identifier[get_job_class] ( identifier[klass_str] ):
literal[string]
identifier[mod_name] , identifier[klass_name] = identifier[klass_str] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[try] :
identifier[mod] = identifier[importlib] . identifier[import_module] ( identifier[mod_name] )
keyword[except] identifier[ImportError] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] , identifier[mod_name] , identifier[e] )
keyword[return]
keyword[try] :
identifier[klass] = identifier[getattr] ( identifier[mod] , identifier[klass_name] )
keyword[except] identifier[AttributeError] :
identifier[logger] . identifier[error] ( literal[string] , identifier[mod_name] , identifier[klass_name] )
keyword[return]
keyword[return] identifier[klass] | def get_job_class(klass_str):
"""
Return the job class
"""
(mod_name, klass_name) = klass_str.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name) # depends on [control=['try'], data=[]]
except ImportError as e:
logger.error("Error importing job module %s: '%s'", mod_name, e)
return # depends on [control=['except'], data=['e']]
try:
klass = getattr(mod, klass_name) # depends on [control=['try'], data=[]]
except AttributeError:
logger.error("Module '%s' does not define a '%s' class", mod_name, klass_name)
return # depends on [control=['except'], data=[]]
return klass |
def remove_nopairs(in_bam, out_dir, config):
"""Remove any reads without both pairs present in the file.
"""
runner = broad.runner_from_config(config)
out_bam = os.path.join(out_dir, "{}-safepair{}".format(*os.path.splitext(os.path.basename(in_bam))))
if not utils.file_exists(out_bam):
read_counts = collections.defaultdict(int)
with pysam.Samfile(in_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
read_counts[read.qname] += 1
with pysam.Samfile(in_bam, "rb") as in_pysam:
with file_transaction(out_bam) as tx_out_bam:
with pysam.Samfile(tx_out_bam, "wb", template=in_pysam) as out_pysam:
for read in in_pysam:
if read_counts[read.qname] == 2:
out_pysam.write(read)
return runner.run_fn("picard_sort", out_bam, "queryname") | def function[remove_nopairs, parameter[in_bam, out_dir, config]]:
constant[Remove any reads without both pairs present in the file.
]
variable[runner] assign[=] call[name[broad].runner_from_config, parameter[name[config]]]
variable[out_bam] assign[=] call[name[os].path.join, parameter[name[out_dir], call[constant[{}-safepair{}].format, parameter[<ast.Starred object at 0x7da1b18aaec0>]]]]
if <ast.UnaryOp object at 0x7da1b18a9900> begin[:]
variable[read_counts] assign[=] call[name[collections].defaultdict, parameter[name[int]]]
with call[name[pysam].Samfile, parameter[name[in_bam], constant[rb]]] begin[:]
for taget[name[read]] in starred[name[in_pysam]] begin[:]
if name[read].is_paired begin[:]
<ast.AugAssign object at 0x7da1b19b9a20>
with call[name[pysam].Samfile, parameter[name[in_bam], constant[rb]]] begin[:]
with call[name[file_transaction], parameter[name[out_bam]]] begin[:]
with call[name[pysam].Samfile, parameter[name[tx_out_bam], constant[wb]]] begin[:]
for taget[name[read]] in starred[name[in_pysam]] begin[:]
if compare[call[name[read_counts]][name[read].qname] equal[==] constant[2]] begin[:]
call[name[out_pysam].write, parameter[name[read]]]
return[call[name[runner].run_fn, parameter[constant[picard_sort], name[out_bam], constant[queryname]]]] | keyword[def] identifier[remove_nopairs] ( identifier[in_bam] , identifier[out_dir] , identifier[config] ):
literal[string]
identifier[runner] = identifier[broad] . identifier[runner_from_config] ( identifier[config] )
identifier[out_bam] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] . identifier[format] (* identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[in_bam] ))))
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_bam] ):
identifier[read_counts] = identifier[collections] . identifier[defaultdict] ( identifier[int] )
keyword[with] identifier[pysam] . identifier[Samfile] ( identifier[in_bam] , literal[string] ) keyword[as] identifier[in_pysam] :
keyword[for] identifier[read] keyword[in] identifier[in_pysam] :
keyword[if] identifier[read] . identifier[is_paired] :
identifier[read_counts] [ identifier[read] . identifier[qname] ]+= literal[int]
keyword[with] identifier[pysam] . identifier[Samfile] ( identifier[in_bam] , literal[string] ) keyword[as] identifier[in_pysam] :
keyword[with] identifier[file_transaction] ( identifier[out_bam] ) keyword[as] identifier[tx_out_bam] :
keyword[with] identifier[pysam] . identifier[Samfile] ( identifier[tx_out_bam] , literal[string] , identifier[template] = identifier[in_pysam] ) keyword[as] identifier[out_pysam] :
keyword[for] identifier[read] keyword[in] identifier[in_pysam] :
keyword[if] identifier[read_counts] [ identifier[read] . identifier[qname] ]== literal[int] :
identifier[out_pysam] . identifier[write] ( identifier[read] )
keyword[return] identifier[runner] . identifier[run_fn] ( literal[string] , identifier[out_bam] , literal[string] ) | def remove_nopairs(in_bam, out_dir, config):
"""Remove any reads without both pairs present in the file.
"""
runner = broad.runner_from_config(config)
out_bam = os.path.join(out_dir, '{}-safepair{}'.format(*os.path.splitext(os.path.basename(in_bam))))
if not utils.file_exists(out_bam):
read_counts = collections.defaultdict(int)
with pysam.Samfile(in_bam, 'rb') as in_pysam:
for read in in_pysam:
if read.is_paired:
read_counts[read.qname] += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['read']] # depends on [control=['with'], data=['in_pysam']]
with pysam.Samfile(in_bam, 'rb') as in_pysam:
with file_transaction(out_bam) as tx_out_bam:
with pysam.Samfile(tx_out_bam, 'wb', template=in_pysam) as out_pysam:
for read in in_pysam:
if read_counts[read.qname] == 2:
out_pysam.write(read) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['read']] # depends on [control=['with'], data=['out_pysam']] # depends on [control=['with'], data=['tx_out_bam']] # depends on [control=['with'], data=['in_pysam']] # depends on [control=['if'], data=[]]
return runner.run_fn('picard_sort', out_bam, 'queryname') |
def save_config(self, cmd="save", confirm=False, confirm_response=""):
""" Save Config for HuaweiSSH"""
return super(HuaweiBase, self).save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
) | def function[save_config, parameter[self, cmd, confirm, confirm_response]]:
constant[ Save Config for HuaweiSSH]
return[call[call[name[super], parameter[name[HuaweiBase], name[self]]].save_config, parameter[]]] | keyword[def] identifier[save_config] ( identifier[self] , identifier[cmd] = literal[string] , identifier[confirm] = keyword[False] , identifier[confirm_response] = literal[string] ):
literal[string]
keyword[return] identifier[super] ( identifier[HuaweiBase] , identifier[self] ). identifier[save_config] (
identifier[cmd] = identifier[cmd] , identifier[confirm] = identifier[confirm] , identifier[confirm_response] = identifier[confirm_response]
) | def save_config(self, cmd='save', confirm=False, confirm_response=''):
""" Save Config for HuaweiSSH"""
return super(HuaweiBase, self).save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response) |
def check(self):
"""Check for validity.
:raises ValueError:
- if not all lines are as long as the :attr:`number of needles
<AYABInterface.machines.Machine.number_of_needles>`
- if the contents of the rows are not :attr:`needle positions
<AYABInterface.machines.Machine.needle_positions>`
"""
# TODO: This violates the law of demeter.
# The architecture should be changed that this check is either
# performed by the machine or by the unity of machine and
# carriage.
expected_positions = self._machine.needle_positions
expected_row_length = self._machine.number_of_needles
for row_index, row in enumerate(self._rows):
if len(row) != expected_row_length:
message = _ROW_LENGTH_ERROR_MESSAGE.format(
row_index, len(row), expected_row_length)
raise ValueError(message)
for needle_index, needle_position in enumerate(row):
if needle_position not in expected_positions:
message = _NEEDLE_POSITION_ERROR_MESSAGE.format(
row_index, needle_index, repr(needle_position),
", ".join(map(repr, expected_positions)))
raise ValueError(message) | def function[check, parameter[self]]:
constant[Check for validity.
:raises ValueError:
- if not all lines are as long as the :attr:`number of needles
<AYABInterface.machines.Machine.number_of_needles>`
- if the contents of the rows are not :attr:`needle positions
<AYABInterface.machines.Machine.needle_positions>`
]
variable[expected_positions] assign[=] name[self]._machine.needle_positions
variable[expected_row_length] assign[=] name[self]._machine.number_of_needles
for taget[tuple[[<ast.Name object at 0x7da20c6c7040>, <ast.Name object at 0x7da20c6c6410>]]] in starred[call[name[enumerate], parameter[name[self]._rows]]] begin[:]
if compare[call[name[len], parameter[name[row]]] not_equal[!=] name[expected_row_length]] begin[:]
variable[message] assign[=] call[name[_ROW_LENGTH_ERROR_MESSAGE].format, parameter[name[row_index], call[name[len], parameter[name[row]]], name[expected_row_length]]]
<ast.Raise object at 0x7da20c6c6a70>
for taget[tuple[[<ast.Name object at 0x7da20c6c76d0>, <ast.Name object at 0x7da20c6c64a0>]]] in starred[call[name[enumerate], parameter[name[row]]]] begin[:]
if compare[name[needle_position] <ast.NotIn object at 0x7da2590d7190> name[expected_positions]] begin[:]
variable[message] assign[=] call[name[_NEEDLE_POSITION_ERROR_MESSAGE].format, parameter[name[row_index], name[needle_index], call[name[repr], parameter[name[needle_position]]], call[constant[, ].join, parameter[call[name[map], parameter[name[repr], name[expected_positions]]]]]]]
<ast.Raise object at 0x7da20c6c5bd0> | keyword[def] identifier[check] ( identifier[self] ):
literal[string]
identifier[expected_positions] = identifier[self] . identifier[_machine] . identifier[needle_positions]
identifier[expected_row_length] = identifier[self] . identifier[_machine] . identifier[number_of_needles]
keyword[for] identifier[row_index] , identifier[row] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_rows] ):
keyword[if] identifier[len] ( identifier[row] )!= identifier[expected_row_length] :
identifier[message] = identifier[_ROW_LENGTH_ERROR_MESSAGE] . identifier[format] (
identifier[row_index] , identifier[len] ( identifier[row] ), identifier[expected_row_length] )
keyword[raise] identifier[ValueError] ( identifier[message] )
keyword[for] identifier[needle_index] , identifier[needle_position] keyword[in] identifier[enumerate] ( identifier[row] ):
keyword[if] identifier[needle_position] keyword[not] keyword[in] identifier[expected_positions] :
identifier[message] = identifier[_NEEDLE_POSITION_ERROR_MESSAGE] . identifier[format] (
identifier[row_index] , identifier[needle_index] , identifier[repr] ( identifier[needle_position] ),
literal[string] . identifier[join] ( identifier[map] ( identifier[repr] , identifier[expected_positions] )))
keyword[raise] identifier[ValueError] ( identifier[message] ) | def check(self):
"""Check for validity.
:raises ValueError:
- if not all lines are as long as the :attr:`number of needles
<AYABInterface.machines.Machine.number_of_needles>`
- if the contents of the rows are not :attr:`needle positions
<AYABInterface.machines.Machine.needle_positions>`
"""
# TODO: This violates the law of demeter.
# The architecture should be changed that this check is either
# performed by the machine or by the unity of machine and
# carriage.
expected_positions = self._machine.needle_positions
expected_row_length = self._machine.number_of_needles
for (row_index, row) in enumerate(self._rows):
if len(row) != expected_row_length:
message = _ROW_LENGTH_ERROR_MESSAGE.format(row_index, len(row), expected_row_length)
raise ValueError(message) # depends on [control=['if'], data=['expected_row_length']]
for (needle_index, needle_position) in enumerate(row):
if needle_position not in expected_positions:
message = _NEEDLE_POSITION_ERROR_MESSAGE.format(row_index, needle_index, repr(needle_position), ', '.join(map(repr, expected_positions)))
raise ValueError(message) # depends on [control=['if'], data=['needle_position', 'expected_positions']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def is_valid_isbn(isbn):
"""
Validate given `isbn`. Wrapper for :func:`is_isbn10_valid`/
:func:`is_isbn13_valid`.
Args:
isbn (str/list): ISBN number as string or list of digits.
Note:
Function doesn't require `isbn` type to be specified (it can be both
10/13 isbn's versions).
Returns:
bool: ``True`` if ISBN is valid.
"""
length = len(isbn)
if length == 10:
return is_isbn10_valid(isbn)
elif length == 13:
return is_isbn13_valid(isbn)
return False | def function[is_valid_isbn, parameter[isbn]]:
constant[
Validate given `isbn`. Wrapper for :func:`is_isbn10_valid`/
:func:`is_isbn13_valid`.
Args:
isbn (str/list): ISBN number as string or list of digits.
Note:
Function doesn't require `isbn` type to be specified (it can be both
10/13 isbn's versions).
Returns:
bool: ``True`` if ISBN is valid.
]
variable[length] assign[=] call[name[len], parameter[name[isbn]]]
if compare[name[length] equal[==] constant[10]] begin[:]
return[call[name[is_isbn10_valid], parameter[name[isbn]]]]
return[constant[False]] | keyword[def] identifier[is_valid_isbn] ( identifier[isbn] ):
literal[string]
identifier[length] = identifier[len] ( identifier[isbn] )
keyword[if] identifier[length] == literal[int] :
keyword[return] identifier[is_isbn10_valid] ( identifier[isbn] )
keyword[elif] identifier[length] == literal[int] :
keyword[return] identifier[is_isbn13_valid] ( identifier[isbn] )
keyword[return] keyword[False] | def is_valid_isbn(isbn):
"""
Validate given `isbn`. Wrapper for :func:`is_isbn10_valid`/
:func:`is_isbn13_valid`.
Args:
isbn (str/list): ISBN number as string or list of digits.
Note:
Function doesn't require `isbn` type to be specified (it can be both
10/13 isbn's versions).
Returns:
bool: ``True`` if ISBN is valid.
"""
length = len(isbn)
if length == 10:
return is_isbn10_valid(isbn) # depends on [control=['if'], data=[]]
elif length == 13:
return is_isbn13_valid(isbn) # depends on [control=['if'], data=[]]
return False |
def url(match, handler=None, methods=None, defaults=None,
redirect_to=None, build_only=False, name=None, **kwargs):
"""Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict
"""
assert isinstance(match, str), "match parameter should be string."
assert handler or redirect_to, "you should specify handler or redirect_to for the url"
if isinstance(methods, str):
methods = [methods.upper()]
elif isinstance(methods, (list, tuple)):
methods = [x.upper() for x in methods]
rule = {"match": match,
"handler": handler,
"methods": methods,
"defaults": defaults,
"redirect_to": redirect_to,
"build_only": build_only,
"name": name,
"extra_data": kwargs}
return rule | def function[url, parameter[match, handler, methods, defaults, redirect_to, build_only, name]]:
constant[Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict
]
assert[call[name[isinstance], parameter[name[match], name[str]]]]
assert[<ast.BoolOp object at 0x7da18fe92a40>]
if call[name[isinstance], parameter[name[methods], name[str]]] begin[:]
variable[methods] assign[=] list[[<ast.Call object at 0x7da18fe90160>]]
variable[rule] assign[=] dictionary[[<ast.Constant object at 0x7da18fe907f0>, <ast.Constant object at 0x7da18fe90f10>, <ast.Constant object at 0x7da18fe91a20>, <ast.Constant object at 0x7da18fe90940>, <ast.Constant object at 0x7da18fe90eb0>, <ast.Constant object at 0x7da18fe930a0>, <ast.Constant object at 0x7da18fe909a0>, <ast.Constant object at 0x7da18fe91900>], [<ast.Name object at 0x7da18fe907c0>, <ast.Name object at 0x7da18fe91180>, <ast.Name object at 0x7da18fe92020>, <ast.Name object at 0x7da18fe913c0>, <ast.Name object at 0x7da18fe92080>, <ast.Name object at 0x7da18fe92920>, <ast.Name object at 0x7da18fe90d30>, <ast.Name object at 0x7da18fe91570>]]
return[name[rule]] | keyword[def] identifier[url] ( identifier[match] , identifier[handler] = keyword[None] , identifier[methods] = keyword[None] , identifier[defaults] = keyword[None] ,
identifier[redirect_to] = keyword[None] , identifier[build_only] = keyword[False] , identifier[name] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[match] , identifier[str] ), literal[string]
keyword[assert] identifier[handler] keyword[or] identifier[redirect_to] , literal[string]
keyword[if] identifier[isinstance] ( identifier[methods] , identifier[str] ):
identifier[methods] =[ identifier[methods] . identifier[upper] ()]
keyword[elif] identifier[isinstance] ( identifier[methods] ,( identifier[list] , identifier[tuple] )):
identifier[methods] =[ identifier[x] . identifier[upper] () keyword[for] identifier[x] keyword[in] identifier[methods] ]
identifier[rule] ={ literal[string] : identifier[match] ,
literal[string] : identifier[handler] ,
literal[string] : identifier[methods] ,
literal[string] : identifier[defaults] ,
literal[string] : identifier[redirect_to] ,
literal[string] : identifier[build_only] ,
literal[string] : identifier[name] ,
literal[string] : identifier[kwargs] }
keyword[return] identifier[rule] | def url(match, handler=None, methods=None, defaults=None, redirect_to=None, build_only=False, name=None, **kwargs):
"""Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict
"""
assert isinstance(match, str), 'match parameter should be string.'
assert handler or redirect_to, 'you should specify handler or redirect_to for the url'
if isinstance(methods, str):
methods = [methods.upper()] # depends on [control=['if'], data=[]]
elif isinstance(methods, (list, tuple)):
methods = [x.upper() for x in methods] # depends on [control=['if'], data=[]]
rule = {'match': match, 'handler': handler, 'methods': methods, 'defaults': defaults, 'redirect_to': redirect_to, 'build_only': build_only, 'name': name, 'extra_data': kwargs}
return rule |
def check_wide_data_for_blank_choices(choice_col, wide_data):
"""
Checks `wide_data` for null values in the choice column, and raises a
helpful ValueError if null values are found.
Parameters
----------
choice_col : str.
Denotes the column in `wide_data` that is used to record each
observation's choice.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain `choice_col`.
Returns
-------
None.
"""
if wide_data[choice_col].isnull().any():
msg_1 = "One or more of the values in wide_data[choice_col] is null."
msg_2 = " Remove null values in the choice column or fill them in."
raise ValueError(msg_1 + msg_2)
return None | def function[check_wide_data_for_blank_choices, parameter[choice_col, wide_data]]:
constant[
Checks `wide_data` for null values in the choice column, and raises a
helpful ValueError if null values are found.
Parameters
----------
choice_col : str.
Denotes the column in `wide_data` that is used to record each
observation's choice.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain `choice_col`.
Returns
-------
None.
]
if call[call[call[name[wide_data]][name[choice_col]].isnull, parameter[]].any, parameter[]] begin[:]
variable[msg_1] assign[=] constant[One or more of the values in wide_data[choice_col] is null.]
variable[msg_2] assign[=] constant[ Remove null values in the choice column or fill them in.]
<ast.Raise object at 0x7da1b13041c0>
return[constant[None]] | keyword[def] identifier[check_wide_data_for_blank_choices] ( identifier[choice_col] , identifier[wide_data] ):
literal[string]
keyword[if] identifier[wide_data] [ identifier[choice_col] ]. identifier[isnull] (). identifier[any] ():
identifier[msg_1] = literal[string]
identifier[msg_2] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[msg_1] + identifier[msg_2] )
keyword[return] keyword[None] | def check_wide_data_for_blank_choices(choice_col, wide_data):
"""
Checks `wide_data` for null values in the choice column, and raises a
helpful ValueError if null values are found.
Parameters
----------
choice_col : str.
Denotes the column in `wide_data` that is used to record each
observation's choice.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain `choice_col`.
Returns
-------
None.
"""
if wide_data[choice_col].isnull().any():
msg_1 = 'One or more of the values in wide_data[choice_col] is null.'
msg_2 = ' Remove null values in the choice column or fill them in.'
raise ValueError(msg_1 + msg_2) # depends on [control=['if'], data=[]]
return None |
def _openFile(self):
"""Opens a file dialog and sets a value for the QLineEdit widget.
This method is also a `SLOT`.
"""
file_types = "Comma Separated Values (*.csv);;Text files (*.txt);;All Files (*)"
ret = QtGui.QFileDialog.getOpenFileName(self,
self.tr('open file'),
filter=file_types)
if isinstance(ret, tuple):
ret = ret[0] #PySide compatibility maybe?
if ret:
self._filenameLineEdit.setText(ret)
self._updateFilename() | def function[_openFile, parameter[self]]:
constant[Opens a file dialog and sets a value for the QLineEdit widget.
This method is also a `SLOT`.
]
variable[file_types] assign[=] constant[Comma Separated Values (*.csv);;Text files (*.txt);;All Files (*)]
variable[ret] assign[=] call[name[QtGui].QFileDialog.getOpenFileName, parameter[name[self], call[name[self].tr, parameter[constant[open file]]]]]
if call[name[isinstance], parameter[name[ret], name[tuple]]] begin[:]
variable[ret] assign[=] call[name[ret]][constant[0]]
if name[ret] begin[:]
call[name[self]._filenameLineEdit.setText, parameter[name[ret]]]
call[name[self]._updateFilename, parameter[]] | keyword[def] identifier[_openFile] ( identifier[self] ):
literal[string]
identifier[file_types] = literal[string]
identifier[ret] = identifier[QtGui] . identifier[QFileDialog] . identifier[getOpenFileName] ( identifier[self] ,
identifier[self] . identifier[tr] ( literal[string] ),
identifier[filter] = identifier[file_types] )
keyword[if] identifier[isinstance] ( identifier[ret] , identifier[tuple] ):
identifier[ret] = identifier[ret] [ literal[int] ]
keyword[if] identifier[ret] :
identifier[self] . identifier[_filenameLineEdit] . identifier[setText] ( identifier[ret] )
identifier[self] . identifier[_updateFilename] () | def _openFile(self):
"""Opens a file dialog and sets a value for the QLineEdit widget.
This method is also a `SLOT`.
"""
file_types = 'Comma Separated Values (*.csv);;Text files (*.txt);;All Files (*)'
ret = QtGui.QFileDialog.getOpenFileName(self, self.tr('open file'), filter=file_types)
if isinstance(ret, tuple):
ret = ret[0] #PySide compatibility maybe? # depends on [control=['if'], data=[]]
if ret:
self._filenameLineEdit.setText(ret)
self._updateFilename() # depends on [control=['if'], data=[]] |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Index Map Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and read plain text into text field
with open(path, 'r') as f:
self.rasterText = f.read()
# Retrieve metadata from header
lines = self.rasterText.split('\n')
for line in lines[0:6]:
spline = line.split()
if 'north' in spline[0].lower():
self.north = float(spline[1])
elif 'south' in spline[0].lower():
self.south = float(spline[1])
elif 'east' in spline[0].lower():
self.east = float(spline[1])
elif 'west' in spline[0].lower():
self.west = float(spline[1])
elif 'rows' in spline[0].lower():
self.rows = int(spline[1])
elif 'cols' in spline[0].lower():
self.columns = int(spline[1])
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session,
grassRasterPath=path,
srid=str(spatialReferenceID),
noData='-1')
self.raster = wkbRaster
self.srid = spatialReferenceID
# Assign other properties
self.filename = filename | def function[_read, parameter[self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile]]:
constant[
Index Map Read from File Method
]
name[self].fileExtension assign[=] name[extension]
with call[name[open], parameter[name[path], constant[r]]] begin[:]
name[self].rasterText assign[=] call[name[f].read, parameter[]]
variable[lines] assign[=] call[name[self].rasterText.split, parameter[constant[
]]]
for taget[name[line]] in starred[call[name[lines]][<ast.Slice object at 0x7da18f09ec80>]] begin[:]
variable[spline] assign[=] call[name[line].split, parameter[]]
if compare[constant[north] in call[call[name[spline]][constant[0]].lower, parameter[]]] begin[:]
name[self].north assign[=] call[name[float], parameter[call[name[spline]][constant[1]]]]
if name[spatial] begin[:]
variable[wkbRaster] assign[=] call[name[RasterLoader].grassAsciiRasterToWKB, parameter[]]
name[self].raster assign[=] name[wkbRaster]
name[self].srid assign[=] name[spatialReferenceID]
name[self].filename assign[=] name[filename] | keyword[def] identifier[_read] ( identifier[self] , identifier[directory] , identifier[filename] , identifier[session] , identifier[path] , identifier[name] , identifier[extension] , identifier[spatial] , identifier[spatialReferenceID] , identifier[replaceParamFile] ):
literal[string]
identifier[self] . identifier[fileExtension] = identifier[extension]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[self] . identifier[rasterText] = identifier[f] . identifier[read] ()
identifier[lines] = identifier[self] . identifier[rasterText] . identifier[split] ( literal[string] )
keyword[for] identifier[line] keyword[in] identifier[lines] [ literal[int] : literal[int] ]:
identifier[spline] = identifier[line] . identifier[split] ()
keyword[if] literal[string] keyword[in] identifier[spline] [ literal[int] ]. identifier[lower] ():
identifier[self] . identifier[north] = identifier[float] ( identifier[spline] [ literal[int] ])
keyword[elif] literal[string] keyword[in] identifier[spline] [ literal[int] ]. identifier[lower] ():
identifier[self] . identifier[south] = identifier[float] ( identifier[spline] [ literal[int] ])
keyword[elif] literal[string] keyword[in] identifier[spline] [ literal[int] ]. identifier[lower] ():
identifier[self] . identifier[east] = identifier[float] ( identifier[spline] [ literal[int] ])
keyword[elif] literal[string] keyword[in] identifier[spline] [ literal[int] ]. identifier[lower] ():
identifier[self] . identifier[west] = identifier[float] ( identifier[spline] [ literal[int] ])
keyword[elif] literal[string] keyword[in] identifier[spline] [ literal[int] ]. identifier[lower] ():
identifier[self] . identifier[rows] = identifier[int] ( identifier[spline] [ literal[int] ])
keyword[elif] literal[string] keyword[in] identifier[spline] [ literal[int] ]. identifier[lower] ():
identifier[self] . identifier[columns] = identifier[int] ( identifier[spline] [ literal[int] ])
keyword[if] identifier[spatial] :
identifier[wkbRaster] = identifier[RasterLoader] . identifier[grassAsciiRasterToWKB] ( identifier[session] = identifier[session] ,
identifier[grassRasterPath] = identifier[path] ,
identifier[srid] = identifier[str] ( identifier[spatialReferenceID] ),
identifier[noData] = literal[string] )
identifier[self] . identifier[raster] = identifier[wkbRaster]
identifier[self] . identifier[srid] = identifier[spatialReferenceID]
identifier[self] . identifier[filename] = identifier[filename] | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Index Map Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and read plain text into text field
with open(path, 'r') as f:
self.rasterText = f.read() # depends on [control=['with'], data=['f']]
# Retrieve metadata from header
lines = self.rasterText.split('\n')
for line in lines[0:6]:
spline = line.split()
if 'north' in spline[0].lower():
self.north = float(spline[1]) # depends on [control=['if'], data=[]]
elif 'south' in spline[0].lower():
self.south = float(spline[1]) # depends on [control=['if'], data=[]]
elif 'east' in spline[0].lower():
self.east = float(spline[1]) # depends on [control=['if'], data=[]]
elif 'west' in spline[0].lower():
self.west = float(spline[1]) # depends on [control=['if'], data=[]]
elif 'rows' in spline[0].lower():
self.rows = int(spline[1]) # depends on [control=['if'], data=[]]
elif 'cols' in spline[0].lower():
self.columns = int(spline[1]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if spatial:
# Get well known binary from the raster file using the MapKit RasterLoader
wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session, grassRasterPath=path, srid=str(spatialReferenceID), noData='-1')
self.raster = wkbRaster
self.srid = spatialReferenceID # depends on [control=['if'], data=[]]
# Assign other properties
self.filename = filename |
def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start() | def function[run, parameter[self]]:
constant[Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
]
name[self]._connection assign[=] call[name[self].connect, parameter[]]
call[name[self]._connection.ioloop.start, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_connection] = identifier[self] . identifier[connect] ()
identifier[self] . identifier[_connection] . identifier[ioloop] . identifier[start] () | def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start() |
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None | def function[findTypeParent, parameter[element, tag]]:
constant[ Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
]
variable[p] assign[=] name[element]
while constant[True] begin[:]
variable[p] assign[=] call[name[p].getparent, parameter[]]
if compare[name[p].tag equal[==] name[tag]] begin[:]
return[name[p]]
return[constant[None]] | keyword[def] identifier[findTypeParent] ( identifier[element] , identifier[tag] ):
literal[string]
identifier[p] = identifier[element]
keyword[while] keyword[True] :
identifier[p] = identifier[p] . identifier[getparent] ()
keyword[if] identifier[p] . identifier[tag] == identifier[tag] :
keyword[return] identifier[p]
keyword[return] keyword[None] | def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# Not found
return None |
def get_item_position(self, analysis_uid):
"""Returns a list with the position for the analysis_uid passed in
within the current worksheet in accordance with the current layout,
where the first item from the list returned is the slot and the second
is the position of the analysis within the slot.
:param analysis_uid: uid of the analysis the position is requested
:return: the position (slot + position within slot) of the analysis
:rtype: list
"""
str_position = self.uids_strpositions.get(analysis_uid, "")
tokens = str_position.split(":")
if len(tokens) != 2:
return None
return [to_int(tokens[0]), to_int(tokens[1])] | def function[get_item_position, parameter[self, analysis_uid]]:
constant[Returns a list with the position for the analysis_uid passed in
within the current worksheet in accordance with the current layout,
where the first item from the list returned is the slot and the second
is the position of the analysis within the slot.
:param analysis_uid: uid of the analysis the position is requested
:return: the position (slot + position within slot) of the analysis
:rtype: list
]
variable[str_position] assign[=] call[name[self].uids_strpositions.get, parameter[name[analysis_uid], constant[]]]
variable[tokens] assign[=] call[name[str_position].split, parameter[constant[:]]]
if compare[call[name[len], parameter[name[tokens]]] not_equal[!=] constant[2]] begin[:]
return[constant[None]]
return[list[[<ast.Call object at 0x7da18f00cf40>, <ast.Call object at 0x7da18f00e950>]]] | keyword[def] identifier[get_item_position] ( identifier[self] , identifier[analysis_uid] ):
literal[string]
identifier[str_position] = identifier[self] . identifier[uids_strpositions] . identifier[get] ( identifier[analysis_uid] , literal[string] )
identifier[tokens] = identifier[str_position] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[tokens] )!= literal[int] :
keyword[return] keyword[None]
keyword[return] [ identifier[to_int] ( identifier[tokens] [ literal[int] ]), identifier[to_int] ( identifier[tokens] [ literal[int] ])] | def get_item_position(self, analysis_uid):
"""Returns a list with the position for the analysis_uid passed in
within the current worksheet in accordance with the current layout,
where the first item from the list returned is the slot and the second
is the position of the analysis within the slot.
:param analysis_uid: uid of the analysis the position is requested
:return: the position (slot + position within slot) of the analysis
:rtype: list
"""
str_position = self.uids_strpositions.get(analysis_uid, '')
tokens = str_position.split(':')
if len(tokens) != 2:
return None # depends on [control=['if'], data=[]]
return [to_int(tokens[0]), to_int(tokens[1])] |
def import_(name, path, mode='import', nodataset=False, brand_opts=None):
'''
Import a zones configuration
name : string
name of the zone
path : string
path of the configuration file to import
mode : string
either import, install, or attach
nodataset : boolean
do not create a ZFS file system
brand_opts : boolean
brand specific options to pass
.. note::
The mode argument can be set to ``import``, ``install``, or ``attach``.
``import``: will only import the configuration
``install``: will import and then try to install the zone
``attach``: will import and then try to attach of the zone
.. code-block:: yaml
omipkg1:
zone.import:
- path: /foo/bar/baz
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
zones = __salt__['zoneadm.list'](installed=True, configured=True)
if name not in zones:
if __opts__['test']:
ret['result'] = True
ret['comment'] = 'Zone {0} was imported from {1}.'.format(
name,
path,
)
ret['changes'][name] = 'imported'
else:
if __salt__['file.file_exists'](path):
res_import = __salt__['zonecfg.import'](name, path)
if not res_import['status']:
ret['result'] = False
ret['comment'] = 'Unable to import zone configuration for {0}!'.format(name)
else:
ret['result'] = True
ret['changes'][name] = 'imported'
ret['comment'] = 'Zone {0} was imported from {1}.'.format(
name,
path,
)
if mode.lower() == 'attach':
res_attach = __salt__['zoneadm.attach'](name, False, brand_opts)
ret['result'] = res_attach['status']
if res_attach['status']:
ret['changes'][name] = 'attached'
ret['comment'] = 'Zone {0} was attached from {1}.'.format(
name,
path,
)
else:
ret['comment'] = []
ret['comment'].append('Failed to attach zone {0} from {1}!'.format(
name,
path,
))
if 'message' in res_attach:
ret['comment'].append(res_attach['message'])
ret['comment'] = "\n".join(ret['comment'])
if mode.lower() == 'install':
res_install = __salt__['zoneadm.install'](name, nodataset, brand_opts)
ret['result'] = res_install['status']
if res_install['status']:
ret['changes'][name] = 'installed'
ret['comment'] = 'Zone {0} was installed from {1}.'.format(
name,
path,
)
else:
ret['comment'] = []
ret['comment'].append('Failed to install zone {0} from {1}!'.format(
name,
path,
))
if 'message' in res_install:
ret['comment'].append(res_install['message'])
ret['comment'] = "\n".join(ret['comment'])
else:
ret['result'] = False
ret['comment'] = 'The file {0} does not exists, unable to import!'.format(path)
else:
## zone exist
ret['result'] = True
ret['comment'] = 'Zone {0} already exists, not importing configuration.'.format(name)
return ret | def function[import_, parameter[name, path, mode, nodataset, brand_opts]]:
constant[
Import a zones configuration
name : string
name of the zone
path : string
path of the configuration file to import
mode : string
either import, install, or attach
nodataset : boolean
do not create a ZFS file system
brand_opts : boolean
brand specific options to pass
.. note::
The mode argument can be set to ``import``, ``install``, or ``attach``.
``import``: will only import the configuration
``install``: will import and then try to install the zone
``attach``: will import and then try to attach of the zone
.. code-block:: yaml
omipkg1:
zone.import:
- path: /foo/bar/baz
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2047e9060>, <ast.Constant object at 0x7da2047e9e40>, <ast.Constant object at 0x7da2047e8cd0>, <ast.Constant object at 0x7da2047e9300>], [<ast.Name object at 0x7da2047eaa40>, <ast.Dict object at 0x7da2047e9510>, <ast.Constant object at 0x7da2047ea1a0>, <ast.Constant object at 0x7da2047e98d0>]]
variable[zones] assign[=] call[call[name[__salt__]][constant[zoneadm.list]], parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[zones]] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Zone {0} was imported from {1}.].format, parameter[name[name], name[path]]]
call[call[name[ret]][constant[changes]]][name[name]] assign[=] constant[imported]
return[name[ret]] | keyword[def] identifier[import_] ( identifier[name] , identifier[path] , identifier[mode] = literal[string] , identifier[nodataset] = keyword[False] , identifier[brand_opts] = keyword[None] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None] ,
literal[string] : literal[string] }
identifier[zones] = identifier[__salt__] [ literal[string] ]( identifier[installed] = keyword[True] , identifier[configured] = keyword[True] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[zones] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[path] ,
)
identifier[ret] [ literal[string] ][ identifier[name] ]= literal[string]
keyword[else] :
keyword[if] identifier[__salt__] [ literal[string] ]( identifier[path] ):
identifier[res_import] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[path] )
keyword[if] keyword[not] identifier[res_import] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ][ identifier[name] ]= literal[string]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[path] ,
)
keyword[if] identifier[mode] . identifier[lower] ()== literal[string] :
identifier[res_attach] = identifier[__salt__] [ literal[string] ]( identifier[name] , keyword[False] , identifier[brand_opts] )
identifier[ret] [ literal[string] ]= identifier[res_attach] [ literal[string] ]
keyword[if] identifier[res_attach] [ literal[string] ]:
identifier[ret] [ literal[string] ][ identifier[name] ]= literal[string]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[path] ,
)
keyword[else] :
identifier[ret] [ literal[string] ]=[]
identifier[ret] [ literal[string] ]. identifier[append] ( literal[string] . identifier[format] (
identifier[name] ,
identifier[path] ,
))
keyword[if] literal[string] keyword[in] identifier[res_attach] :
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[res_attach] [ literal[string] ])
identifier[ret] [ literal[string] ]= literal[string] . identifier[join] ( identifier[ret] [ literal[string] ])
keyword[if] identifier[mode] . identifier[lower] ()== literal[string] :
identifier[res_install] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[nodataset] , identifier[brand_opts] )
identifier[ret] [ literal[string] ]= identifier[res_install] [ literal[string] ]
keyword[if] identifier[res_install] [ literal[string] ]:
identifier[ret] [ literal[string] ][ identifier[name] ]= literal[string]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[path] ,
)
keyword[else] :
identifier[ret] [ literal[string] ]=[]
identifier[ret] [ literal[string] ]. identifier[append] ( literal[string] . identifier[format] (
identifier[name] ,
identifier[path] ,
))
keyword[if] literal[string] keyword[in] identifier[res_install] :
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[res_install] [ literal[string] ])
identifier[ret] [ literal[string] ]= literal[string] . identifier[join] ( identifier[ret] [ literal[string] ])
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[path] )
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def import_(name, path, mode='import', nodataset=False, brand_opts=None):
"""
Import a zones configuration
name : string
name of the zone
path : string
path of the configuration file to import
mode : string
either import, install, or attach
nodataset : boolean
do not create a ZFS file system
brand_opts : boolean
brand specific options to pass
.. note::
The mode argument can be set to ``import``, ``install``, or ``attach``.
``import``: will only import the configuration
``install``: will import and then try to install the zone
``attach``: will import and then try to attach of the zone
.. code-block:: yaml
omipkg1:
zone.import:
- path: /foo/bar/baz
"""
ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''}
zones = __salt__['zoneadm.list'](installed=True, configured=True)
if name not in zones:
if __opts__['test']:
ret['result'] = True
ret['comment'] = 'Zone {0} was imported from {1}.'.format(name, path)
ret['changes'][name] = 'imported' # depends on [control=['if'], data=[]]
elif __salt__['file.file_exists'](path):
res_import = __salt__['zonecfg.import'](name, path)
if not res_import['status']:
ret['result'] = False
ret['comment'] = 'Unable to import zone configuration for {0}!'.format(name) # depends on [control=['if'], data=[]]
else:
ret['result'] = True
ret['changes'][name] = 'imported'
ret['comment'] = 'Zone {0} was imported from {1}.'.format(name, path)
if mode.lower() == 'attach':
res_attach = __salt__['zoneadm.attach'](name, False, brand_opts)
ret['result'] = res_attach['status']
if res_attach['status']:
ret['changes'][name] = 'attached'
ret['comment'] = 'Zone {0} was attached from {1}.'.format(name, path) # depends on [control=['if'], data=[]]
else:
ret['comment'] = []
ret['comment'].append('Failed to attach zone {0} from {1}!'.format(name, path))
if 'message' in res_attach:
ret['comment'].append(res_attach['message']) # depends on [control=['if'], data=['res_attach']]
ret['comment'] = '\n'.join(ret['comment']) # depends on [control=['if'], data=[]]
if mode.lower() == 'install':
res_install = __salt__['zoneadm.install'](name, nodataset, brand_opts)
ret['result'] = res_install['status']
if res_install['status']:
ret['changes'][name] = 'installed'
ret['comment'] = 'Zone {0} was installed from {1}.'.format(name, path) # depends on [control=['if'], data=[]]
else:
ret['comment'] = []
ret['comment'].append('Failed to install zone {0} from {1}!'.format(name, path))
if 'message' in res_install:
ret['comment'].append(res_install['message']) # depends on [control=['if'], data=['res_install']]
ret['comment'] = '\n'.join(ret['comment']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'The file {0} does not exists, unable to import!'.format(path) # depends on [control=['if'], data=['name']]
else:
## zone exist
ret['result'] = True
ret['comment'] = 'Zone {0} already exists, not importing configuration.'.format(name)
return ret |
def _check_rules(browser, rules_js, config):
"""
Run an accessibility audit on the page using the axe-core ruleset.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A list of violations.
Related documentation:
https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object
__Caution__: You probably don't really want to call this method
directly! It will be used by `AxeCoreAudit.do_audit`.
"""
audit_run_script = dedent(u"""
{rules_js}
{custom_rules}
axe.configure(customRules);
var callback = function(err, results) {{
if (err) throw err;
window.a11yAuditResults = JSON.stringify(results);
window.console.log(window.a11yAuditResults);
}}
axe.run({context}, {options}, callback);
""").format(
rules_js=rules_js,
custom_rules=config.custom_rules,
context=config.context,
options=config.rules
)
audit_results_script = dedent(u"""
window.console.log(window.a11yAuditResults);
return window.a11yAuditResults;
""")
browser.execute_script(audit_run_script)
def audit_results_check_func():
"""
A method to check that the audit has completed.
Returns:
(True, results) if the results are available.
(False, None) if the results aren't available.
"""
unicode_results = browser.execute_script(audit_results_script)
try:
results = json.loads(unicode_results)
except (TypeError, ValueError):
results = None
if results:
return True, results
return False, None
result = Promise(
audit_results_check_func,
"Timed out waiting for a11y audit results.",
timeout=5,
).fulfill()
# audit_results is report of accessibility violations for that session
# Note that this ruleset doesn't have distinct error/warning levels.
audit_results = result.get('violations')
return audit_results | def function[_check_rules, parameter[browser, rules_js, config]]:
constant[
Run an accessibility audit on the page using the axe-core ruleset.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A list of violations.
Related documentation:
https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object
__Caution__: You probably don't really want to call this method
directly! It will be used by `AxeCoreAudit.do_audit`.
]
variable[audit_run_script] assign[=] call[call[name[dedent], parameter[constant[
{rules_js}
{custom_rules}
axe.configure(customRules);
var callback = function(err, results) {{
if (err) throw err;
window.a11yAuditResults = JSON.stringify(results);
window.console.log(window.a11yAuditResults);
}}
axe.run({context}, {options}, callback);
]]].format, parameter[]]
variable[audit_results_script] assign[=] call[name[dedent], parameter[constant[
window.console.log(window.a11yAuditResults);
return window.a11yAuditResults;
]]]
call[name[browser].execute_script, parameter[name[audit_run_script]]]
def function[audit_results_check_func, parameter[]]:
constant[
A method to check that the audit has completed.
Returns:
(True, results) if the results are available.
(False, None) if the results aren't available.
]
variable[unicode_results] assign[=] call[name[browser].execute_script, parameter[name[audit_results_script]]]
<ast.Try object at 0x7da20c76d480>
if name[results] begin[:]
return[tuple[[<ast.Constant object at 0x7da20c76cd00>, <ast.Name object at 0x7da20c76cf40>]]]
return[tuple[[<ast.Constant object at 0x7da20c76e380>, <ast.Constant object at 0x7da20c76ceb0>]]]
variable[result] assign[=] call[call[name[Promise], parameter[name[audit_results_check_func], constant[Timed out waiting for a11y audit results.]]].fulfill, parameter[]]
variable[audit_results] assign[=] call[name[result].get, parameter[constant[violations]]]
return[name[audit_results]] | keyword[def] identifier[_check_rules] ( identifier[browser] , identifier[rules_js] , identifier[config] ):
literal[string]
identifier[audit_run_script] = identifier[dedent] ( literal[string] ). identifier[format] (
identifier[rules_js] = identifier[rules_js] ,
identifier[custom_rules] = identifier[config] . identifier[custom_rules] ,
identifier[context] = identifier[config] . identifier[context] ,
identifier[options] = identifier[config] . identifier[rules]
)
identifier[audit_results_script] = identifier[dedent] ( literal[string] )
identifier[browser] . identifier[execute_script] ( identifier[audit_run_script] )
keyword[def] identifier[audit_results_check_func] ():
literal[string]
identifier[unicode_results] = identifier[browser] . identifier[execute_script] ( identifier[audit_results_script] )
keyword[try] :
identifier[results] = identifier[json] . identifier[loads] ( identifier[unicode_results] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
identifier[results] = keyword[None]
keyword[if] identifier[results] :
keyword[return] keyword[True] , identifier[results]
keyword[return] keyword[False] , keyword[None]
identifier[result] = identifier[Promise] (
identifier[audit_results_check_func] ,
literal[string] ,
identifier[timeout] = literal[int] ,
). identifier[fulfill] ()
identifier[audit_results] = identifier[result] . identifier[get] ( literal[string] )
keyword[return] identifier[audit_results] | def _check_rules(browser, rules_js, config):
"""
Run an accessibility audit on the page using the axe-core ruleset.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A list of violations.
Related documentation:
https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object
__Caution__: You probably don't really want to call this method
directly! It will be used by `AxeCoreAudit.do_audit`.
"""
audit_run_script = dedent(u'\n {rules_js}\n {custom_rules}\n axe.configure(customRules);\n var callback = function(err, results) {{\n if (err) throw err;\n window.a11yAuditResults = JSON.stringify(results);\n window.console.log(window.a11yAuditResults);\n }}\n axe.run({context}, {options}, callback);\n ').format(rules_js=rules_js, custom_rules=config.custom_rules, context=config.context, options=config.rules)
audit_results_script = dedent(u'\n window.console.log(window.a11yAuditResults);\n return window.a11yAuditResults;\n ')
browser.execute_script(audit_run_script)
def audit_results_check_func():
"""
A method to check that the audit has completed.
Returns:
(True, results) if the results are available.
(False, None) if the results aren't available.
"""
unicode_results = browser.execute_script(audit_results_script)
try:
results = json.loads(unicode_results) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
results = None # depends on [control=['except'], data=[]]
if results:
return (True, results) # depends on [control=['if'], data=[]]
return (False, None)
result = Promise(audit_results_check_func, 'Timed out waiting for a11y audit results.', timeout=5).fulfill()
# audit_results is report of accessibility violations for that session
# Note that this ruleset doesn't have distinct error/warning levels.
audit_results = result.get('violations')
return audit_results |
def retrieve_file_handles_of_same_dataset(self, **args):
'''
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
'''
mandatory_args = ['drs_id', 'version_number', 'data_node', 'prefix']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__reset_error_messages()
# Try plan A
file_handles = None
try:
file_handles = self.__strategy1(args) # can raise SolrError or SolrSwitchedOff, but can't return None
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during first query: '+e.message)
if file_handles is not None and len(file_handles)>0:
LOGGER.debug('Retrieved file handles from solr in first query.')
return file_handles
# Try plan B
try:
file_handles = self.__strategy2(args) # can raise SolrError or SolrSwitchedOff, but can't return None
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during second query: '+e.message)
msg = '/n'.join(self.__error_messages)
raise esgfpid.exceptions.SolrError('Failure in both queries. Messages:\n'+msg)
return file_handles | def function[retrieve_file_handles_of_same_dataset, parameter[self]]:
constant[
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
]
variable[mandatory_args] assign[=] list[[<ast.Constant object at 0x7da1b1ec10f0>, <ast.Constant object at 0x7da1b1ec2920>, <ast.Constant object at 0x7da1b1ec1810>, <ast.Constant object at 0x7da1b1ec1b40>]]
call[name[esgfpid].utils.check_presence_of_mandatory_args, parameter[name[args], name[mandatory_args]]]
call[name[self].__reset_error_messages, parameter[]]
variable[file_handles] assign[=] constant[None]
<ast.Try object at 0x7da1b1ec38b0>
if <ast.BoolOp object at 0x7da1b1ec3010> begin[:]
call[name[LOGGER].debug, parameter[constant[Retrieved file handles from solr in first query.]]]
return[name[file_handles]]
<ast.Try object at 0x7da1b1ec0d90>
return[name[file_handles]] | keyword[def] identifier[retrieve_file_handles_of_same_dataset] ( identifier[self] ,** identifier[args] ):
literal[string]
identifier[mandatory_args] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[esgfpid] . identifier[utils] . identifier[check_presence_of_mandatory_args] ( identifier[args] , identifier[mandatory_args] )
identifier[self] . identifier[__reset_error_messages] ()
identifier[file_handles] = keyword[None]
keyword[try] :
identifier[file_handles] = identifier[self] . identifier[__strategy1] ( identifier[args] )
keyword[except] identifier[esgfpid] . identifier[exceptions] . identifier[SolrError] keyword[as] identifier[e] :
identifier[self] . identifier[__error_messages] . identifier[append] ( literal[string] + identifier[e] . identifier[message] )
keyword[if] identifier[file_handles] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[file_handles] )> literal[int] :
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[return] identifier[file_handles]
keyword[try] :
identifier[file_handles] = identifier[self] . identifier[__strategy2] ( identifier[args] )
keyword[except] identifier[esgfpid] . identifier[exceptions] . identifier[SolrError] keyword[as] identifier[e] :
identifier[self] . identifier[__error_messages] . identifier[append] ( literal[string] + identifier[e] . identifier[message] )
identifier[msg] = literal[string] . identifier[join] ( identifier[self] . identifier[__error_messages] )
keyword[raise] identifier[esgfpid] . identifier[exceptions] . identifier[SolrError] ( literal[string] + identifier[msg] )
keyword[return] identifier[file_handles] | def retrieve_file_handles_of_same_dataset(self, **args):
"""
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
"""
mandatory_args = ['drs_id', 'version_number', 'data_node', 'prefix']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__reset_error_messages()
# Try plan A
file_handles = None
try:
file_handles = self.__strategy1(args) # can raise SolrError or SolrSwitchedOff, but can't return None # depends on [control=['try'], data=[]]
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during first query: ' + e.message) # depends on [control=['except'], data=['e']]
if file_handles is not None and len(file_handles) > 0:
LOGGER.debug('Retrieved file handles from solr in first query.')
return file_handles # depends on [control=['if'], data=[]]
# Try plan B
try:
file_handles = self.__strategy2(args) # can raise SolrError or SolrSwitchedOff, but can't return None # depends on [control=['try'], data=[]]
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during second query: ' + e.message)
msg = '/n'.join(self.__error_messages)
raise esgfpid.exceptions.SolrError('Failure in both queries. Messages:\n' + msg) # depends on [control=['except'], data=['e']]
return file_handles |
def dataset_search(self, dataset_returning_query):
"""
Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult`
"""
self._validate_search_query(dataset_returning_query)
return self._execute_search_query(
dataset_returning_query,
DatasetSearchResult
) | def function[dataset_search, parameter[self, dataset_returning_query]]:
constant[
Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult`
]
call[name[self]._validate_search_query, parameter[name[dataset_returning_query]]]
return[call[name[self]._execute_search_query, parameter[name[dataset_returning_query], name[DatasetSearchResult]]]] | keyword[def] identifier[dataset_search] ( identifier[self] , identifier[dataset_returning_query] ):
literal[string]
identifier[self] . identifier[_validate_search_query] ( identifier[dataset_returning_query] )
keyword[return] identifier[self] . identifier[_execute_search_query] (
identifier[dataset_returning_query] ,
identifier[DatasetSearchResult]
) | def dataset_search(self, dataset_returning_query):
"""
Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult`
"""
self._validate_search_query(dataset_returning_query)
return self._execute_search_query(dataset_returning_query, DatasetSearchResult) |
def convert_to_geoff(discoursegraph):
"""
Parameters
----------
discoursegraph : DiscourseDocumentGraph
the discourse document graph to be converted into GEOFF format
Returns
-------
geoff : string
a geoff string representation of the discourse graph.
"""
dg_copy = deepcopy(discoursegraph)
layerset2list(dg_copy)
add_node_ids_as_labels(dg_copy)
return graph2geoff(dg_copy, 'LINKS_TO') | def function[convert_to_geoff, parameter[discoursegraph]]:
constant[
Parameters
----------
discoursegraph : DiscourseDocumentGraph
the discourse document graph to be converted into GEOFF format
Returns
-------
geoff : string
a geoff string representation of the discourse graph.
]
variable[dg_copy] assign[=] call[name[deepcopy], parameter[name[discoursegraph]]]
call[name[layerset2list], parameter[name[dg_copy]]]
call[name[add_node_ids_as_labels], parameter[name[dg_copy]]]
return[call[name[graph2geoff], parameter[name[dg_copy], constant[LINKS_TO]]]] | keyword[def] identifier[convert_to_geoff] ( identifier[discoursegraph] ):
literal[string]
identifier[dg_copy] = identifier[deepcopy] ( identifier[discoursegraph] )
identifier[layerset2list] ( identifier[dg_copy] )
identifier[add_node_ids_as_labels] ( identifier[dg_copy] )
keyword[return] identifier[graph2geoff] ( identifier[dg_copy] , literal[string] ) | def convert_to_geoff(discoursegraph):
"""
Parameters
----------
discoursegraph : DiscourseDocumentGraph
the discourse document graph to be converted into GEOFF format
Returns
-------
geoff : string
a geoff string representation of the discourse graph.
"""
dg_copy = deepcopy(discoursegraph)
layerset2list(dg_copy)
add_node_ids_as_labels(dg_copy)
return graph2geoff(dg_copy, 'LINKS_TO') |
def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | def function[get_cluster_assignment, parameter[self]]:
constant[Fetch the cluster layout in form of assignment from zookeeper]
variable[plan] assign[=] call[name[self].get_cluster_plan, parameter[]]
variable[assignment] assign[=] dictionary[[], []]
for taget[name[elem]] in starred[call[name[plan]][constant[partitions]]] begin[:]
call[name[assignment]][tuple[[<ast.Subscript object at 0x7da1b07880d0>, <ast.Subscript object at 0x7da1b0788b50>]]] assign[=] call[name[elem]][constant[replicas]]
return[name[assignment]] | keyword[def] identifier[get_cluster_assignment] ( identifier[self] ):
literal[string]
identifier[plan] = identifier[self] . identifier[get_cluster_plan] ()
identifier[assignment] ={}
keyword[for] identifier[elem] keyword[in] identifier[plan] [ literal[string] ]:
identifier[assignment] [
( identifier[elem] [ literal[string] ], identifier[elem] [ literal[string] ])
]= identifier[elem] [ literal[string] ]
keyword[return] identifier[assignment] | def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[elem['topic'], elem['partition']] = elem['replicas'] # depends on [control=['for'], data=['elem']]
return assignment |
def stage_import_from_filesystem(self, filepath):
"""Stage an import from a filesystem path.
:param filepath: Local filesystem path as string.
:return: :class:`imports.Import <imports.Import>` object
"""
schema = ImportSchema()
resp = self.service.post(self.base,
params={'path': filepath})
return self.service.decode(schema, resp) | def function[stage_import_from_filesystem, parameter[self, filepath]]:
constant[Stage an import from a filesystem path.
:param filepath: Local filesystem path as string.
:return: :class:`imports.Import <imports.Import>` object
]
variable[schema] assign[=] call[name[ImportSchema], parameter[]]
variable[resp] assign[=] call[name[self].service.post, parameter[name[self].base]]
return[call[name[self].service.decode, parameter[name[schema], name[resp]]]] | keyword[def] identifier[stage_import_from_filesystem] ( identifier[self] , identifier[filepath] ):
literal[string]
identifier[schema] = identifier[ImportSchema] ()
identifier[resp] = identifier[self] . identifier[service] . identifier[post] ( identifier[self] . identifier[base] ,
identifier[params] ={ literal[string] : identifier[filepath] })
keyword[return] identifier[self] . identifier[service] . identifier[decode] ( identifier[schema] , identifier[resp] ) | def stage_import_from_filesystem(self, filepath):
"""Stage an import from a filesystem path.
:param filepath: Local filesystem path as string.
:return: :class:`imports.Import <imports.Import>` object
"""
schema = ImportSchema()
resp = self.service.post(self.base, params={'path': filepath})
return self.service.decode(schema, resp) |
def _dict_subset(keys, master_dict):
'''
Return a dictionary of only the subset of keys/values specified in keys
'''
return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys]) | def function[_dict_subset, parameter[keys, master_dict]]:
constant[
Return a dictionary of only the subset of keys/values specified in keys
]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da20c6c6440>]]] | keyword[def] identifier[_dict_subset] ( identifier[keys] , identifier[master_dict] ):
literal[string]
keyword[return] identifier[dict] ([( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[master_dict] ) keyword[if] identifier[k] keyword[in] identifier[keys] ]) | def _dict_subset(keys, master_dict):
"""
Return a dictionary of only the subset of keys/values specified in keys
"""
return dict([(k, v) for (k, v) in six.iteritems(master_dict) if k in keys]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.