code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def str_on_2_unicode_on_3(s):
"""
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
"""
if not PY3:
return str(s)
else: # 3+
if not isinstance(s, str):
return str(s, encoding="utf-8")
return s | def function[str_on_2_unicode_on_3, parameter[s]]:
constant[
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
]
if <ast.UnaryOp object at 0x7da1b0e0f0a0> begin[:]
return[call[name[str], parameter[name[s]]]] | keyword[def] identifier[str_on_2_unicode_on_3] ( identifier[s] ):
literal[string]
keyword[if] keyword[not] identifier[PY3] :
keyword[return] identifier[str] ( identifier[s] )
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[s] , identifier[str] ):
keyword[return] identifier[str] ( identifier[s] , identifier[encoding] = literal[string] )
keyword[return] identifier[s] | def str_on_2_unicode_on_3(s):
"""
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
"""
if not PY3:
return str(s) # depends on [control=['if'], data=[]]
else: # 3+
if not isinstance(s, str):
return str(s, encoding='utf-8') # depends on [control=['if'], data=[]]
return s |
def process_command(self, command):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = self.image
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
return result | def function[process_command, parameter[self, command]]:
constant[Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
]
variable[result] assign[=] call[name[ScubaContext], parameter[]]
name[result].script assign[=] constant[None]
name[result].image assign[=] name[self].image
name[result].entrypoint assign[=] name[self].entrypoint
name[result].environment assign[=] call[name[self].environment.copy, parameter[]]
if name[command] begin[:]
variable[alias] assign[=] call[name[self].aliases.get, parameter[call[name[command]][constant[0]]]]
if <ast.UnaryOp object at 0x7da1b0dcb760> begin[:]
name[result].script assign[=] list[[<ast.Call object at 0x7da1b0dcb640>]]
name[result].script assign[=] call[name[flatten_list], parameter[name[result].script]]
return[name[result]] | keyword[def] identifier[process_command] ( identifier[self] , identifier[command] ):
literal[string]
identifier[result] = identifier[ScubaContext] ()
identifier[result] . identifier[script] = keyword[None]
identifier[result] . identifier[image] = identifier[self] . identifier[image]
identifier[result] . identifier[entrypoint] = identifier[self] . identifier[entrypoint]
identifier[result] . identifier[environment] = identifier[self] . identifier[environment] . identifier[copy] ()
keyword[if] identifier[command] :
identifier[alias] = identifier[self] . identifier[aliases] . identifier[get] ( identifier[command] [ literal[int] ])
keyword[if] keyword[not] identifier[alias] :
identifier[result] . identifier[script] =[ identifier[shell_quote_cmd] ( identifier[command] )]
keyword[else] :
keyword[if] identifier[alias] . identifier[image] :
identifier[result] . identifier[image] = identifier[alias] . identifier[image]
keyword[if] identifier[alias] . identifier[entrypoint] keyword[is] keyword[not] keyword[None] :
identifier[result] . identifier[entrypoint] = identifier[alias] . identifier[entrypoint]
keyword[if] identifier[alias] . identifier[environment] :
identifier[result] . identifier[environment] . identifier[update] ( identifier[alias] . identifier[environment] )
keyword[if] identifier[len] ( identifier[alias] . identifier[script] )> literal[int] :
keyword[if] identifier[len] ( identifier[command] )> literal[int] :
keyword[raise] identifier[ConfigError] ( literal[string] )
identifier[result] . identifier[script] = identifier[alias] . identifier[script]
keyword[else] :
identifier[command] . identifier[pop] ( literal[int] )
identifier[result] . identifier[script] =[ identifier[alias] . identifier[script] [ literal[int] ]+ literal[string] + identifier[shell_quote_cmd] ( identifier[command] )]
identifier[result] . identifier[script] = identifier[flatten_list] ( identifier[result] . identifier[script] )
keyword[return] identifier[result] | def process_command(self, command):
"""Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
"""
result = ScubaContext()
result.script = None
result.image = self.image
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)] # depends on [control=['if'], data=[]]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image # depends on [control=['if'], data=[]]
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint # depends on [control=['if'], data=[]]
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment) # depends on [control=['if'], data=[]]
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases') # depends on [control=['if'], data=[]]
result.script = alias.script # depends on [control=['if'], data=[]]
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script) # depends on [control=['if'], data=[]]
return result |
def install(self, release_id):
"""Install the contents of the local directory into a release directory.
If the directory for the given release ID does not exist on the remote
system, it will be created. The directory will be created according to
the standard Tunic directory structure (see :doc:`design`).
Note that the name and path of the local directory is irrelevant, only
the contents of the specified directory will be transferred to the remote
server. The contents will end up as children of the release directory on
the remote server.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a directory that already
exists, contents of the local directory will be copied into
this directory.
:return: The results of the ``put`` command using Fabric. This return
value is an iterable of the paths of all files uploaded on the remote
server.
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("mkdir -p '{0}'".format(release_path))
# Make sure to remove any user supplied globs or trailing slashes
# so that we can ensure exactly the glob behavior we want from the
# put command.
local_path = self._local_path.strip('*').strip(os.path.sep)
return self._runner.put(os.path.join(local_path, '*'), release_path) | def function[install, parameter[self, release_id]]:
constant[Install the contents of the local directory into a release directory.
If the directory for the given release ID does not exist on the remote
system, it will be created. The directory will be created according to
the standard Tunic directory structure (see :doc:`design`).
Note that the name and path of the local directory is irrelevant, only
the contents of the specified directory will be transferred to the remote
server. The contents will end up as children of the release directory on
the remote server.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a directory that already
exists, contents of the local directory will be copied into
this directory.
:return: The results of the ``put`` command using Fabric. This return
value is an iterable of the paths of all files uploaded on the remote
server.
]
variable[release_path] assign[=] call[name[os].path.join, parameter[name[self]._releases, name[release_id]]]
if <ast.UnaryOp object at 0x7da1b168dcf0> begin[:]
call[name[self]._runner.run, parameter[call[constant[mkdir -p '{0}'].format, parameter[name[release_path]]]]]
variable[local_path] assign[=] call[call[name[self]._local_path.strip, parameter[constant[*]]].strip, parameter[name[os].path.sep]]
return[call[name[self]._runner.put, parameter[call[name[os].path.join, parameter[name[local_path], constant[*]]], name[release_path]]]] | keyword[def] identifier[install] ( identifier[self] , identifier[release_id] ):
literal[string]
identifier[release_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_releases] , identifier[release_id] )
keyword[if] keyword[not] identifier[self] . identifier[_runner] . identifier[exists] ( identifier[release_path] ):
identifier[self] . identifier[_runner] . identifier[run] ( literal[string] . identifier[format] ( identifier[release_path] ))
identifier[local_path] = identifier[self] . identifier[_local_path] . identifier[strip] ( literal[string] ). identifier[strip] ( identifier[os] . identifier[path] . identifier[sep] )
keyword[return] identifier[self] . identifier[_runner] . identifier[put] ( identifier[os] . identifier[path] . identifier[join] ( identifier[local_path] , literal[string] ), identifier[release_path] ) | def install(self, release_id):
"""Install the contents of the local directory into a release directory.
If the directory for the given release ID does not exist on the remote
system, it will be created. The directory will be created according to
the standard Tunic directory structure (see :doc:`design`).
Note that the name and path of the local directory is irrelevant, only
the contents of the specified directory will be transferred to the remote
server. The contents will end up as children of the release directory on
the remote server.
:param str release_id: Timestamp-based identifier for this
deployment. If this ID corresponds to a directory that already
exists, contents of the local directory will be copied into
this directory.
:return: The results of the ``put`` command using Fabric. This return
value is an iterable of the paths of all files uploaded on the remote
server.
"""
release_path = os.path.join(self._releases, release_id)
if not self._runner.exists(release_path):
self._runner.run("mkdir -p '{0}'".format(release_path)) # depends on [control=['if'], data=[]]
# Make sure to remove any user supplied globs or trailing slashes
# so that we can ensure exactly the glob behavior we want from the
# put command.
local_path = self._local_path.strip('*').strip(os.path.sep)
return self._runner.put(os.path.join(local_path, '*'), release_path) |
def set_chime_volume(self, volume):
"""
:param volume: one of [low, medium, high]
"""
values = {
"desired_state": {
"chime_volume": volume
}
}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | def function[set_chime_volume, parameter[self, volume]]:
constant[
:param volume: one of [low, medium, high]
]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da1b2633040>], [<ast.Dict object at 0x7da1b2630b20>]]
variable[response] assign[=] call[name[self].api_interface.set_device_state, parameter[name[self], name[values]]]
call[name[self]._update_state_from_response, parameter[name[response]]] | keyword[def] identifier[set_chime_volume] ( identifier[self] , identifier[volume] ):
literal[string]
identifier[values] ={
literal[string] :{
literal[string] : identifier[volume]
}
}
identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] , identifier[values] )
identifier[self] . identifier[_update_state_from_response] ( identifier[response] ) | def set_chime_volume(self, volume):
"""
:param volume: one of [low, medium, high]
"""
values = {'desired_state': {'chime_volume': volume}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) |
def string_to_one_hot(string, maxchar):
"""Converts an ASCII string to a one-of-k encoding."""
ascii = np.array([ord(c) for c in string]).T
return np.array(ascii[:,None] == np.arange(maxchar)[None, :], dtype=int) | def function[string_to_one_hot, parameter[string, maxchar]]:
constant[Converts an ASCII string to a one-of-k encoding.]
variable[ascii] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da2054a5f90>]].T
return[call[name[np].array, parameter[compare[call[name[ascii]][tuple[[<ast.Slice object at 0x7da2054a4100>, <ast.Constant object at 0x7da2054a7880>]]] equal[==] call[call[name[np].arange, parameter[name[maxchar]]]][tuple[[<ast.Constant object at 0x7da2054a6860>, <ast.Slice object at 0x7da2054a72b0>]]]]]]] | keyword[def] identifier[string_to_one_hot] ( identifier[string] , identifier[maxchar] ):
literal[string]
identifier[ascii] = identifier[np] . identifier[array] ([ identifier[ord] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[string] ]). identifier[T]
keyword[return] identifier[np] . identifier[array] ( identifier[ascii] [:, keyword[None] ]== identifier[np] . identifier[arange] ( identifier[maxchar] )[ keyword[None] ,:], identifier[dtype] = identifier[int] ) | def string_to_one_hot(string, maxchar):
"""Converts an ASCII string to a one-of-k encoding."""
ascii = np.array([ord(c) for c in string]).T
return np.array(ascii[:, None] == np.arange(maxchar)[None, :], dtype=int) |
def corba_name_to_string(name):
'''Convert a CORBA CosNaming.Name to a string.'''
parts = []
if type(name) is not list and type(name) is not tuple:
raise NotCORBANameError(name)
if len(name) == 0:
raise NotCORBANameError(name)
for nc in name:
if not nc.kind:
parts.append(nc.id)
else:
parts.append('{0}.{1}'.format(nc.id, nc.kind))
return '/'.join(parts) | def function[corba_name_to_string, parameter[name]]:
constant[Convert a CORBA CosNaming.Name to a string.]
variable[parts] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18dc9a2f0> begin[:]
<ast.Raise object at 0x7da18bc71360>
if compare[call[name[len], parameter[name[name]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18bc73910>
for taget[name[nc]] in starred[name[name]] begin[:]
if <ast.UnaryOp object at 0x7da18bc735b0> begin[:]
call[name[parts].append, parameter[name[nc].id]]
return[call[constant[/].join, parameter[name[parts]]]] | keyword[def] identifier[corba_name_to_string] ( identifier[name] ):
literal[string]
identifier[parts] =[]
keyword[if] identifier[type] ( identifier[name] ) keyword[is] keyword[not] identifier[list] keyword[and] identifier[type] ( identifier[name] ) keyword[is] keyword[not] identifier[tuple] :
keyword[raise] identifier[NotCORBANameError] ( identifier[name] )
keyword[if] identifier[len] ( identifier[name] )== literal[int] :
keyword[raise] identifier[NotCORBANameError] ( identifier[name] )
keyword[for] identifier[nc] keyword[in] identifier[name] :
keyword[if] keyword[not] identifier[nc] . identifier[kind] :
identifier[parts] . identifier[append] ( identifier[nc] . identifier[id] )
keyword[else] :
identifier[parts] . identifier[append] ( literal[string] . identifier[format] ( identifier[nc] . identifier[id] , identifier[nc] . identifier[kind] ))
keyword[return] literal[string] . identifier[join] ( identifier[parts] ) | def corba_name_to_string(name):
"""Convert a CORBA CosNaming.Name to a string."""
parts = []
if type(name) is not list and type(name) is not tuple:
raise NotCORBANameError(name) # depends on [control=['if'], data=[]]
if len(name) == 0:
raise NotCORBANameError(name) # depends on [control=['if'], data=[]]
for nc in name:
if not nc.kind:
parts.append(nc.id) # depends on [control=['if'], data=[]]
else:
parts.append('{0}.{1}'.format(nc.id, nc.kind)) # depends on [control=['for'], data=['nc']]
return '/'.join(parts) |
def update_file(finfo, sample_info, config, pass_uptodate=False):
"""Update the file in local filesystem storage.
"""
storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config))
if finfo.get("type") == "directory":
return _copy_finfo_directory(finfo, storage_dir)
else:
return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate) | def function[update_file, parameter[finfo, sample_info, config, pass_uptodate]]:
constant[Update the file in local filesystem storage.
]
variable[storage_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[_get_storage_dir], parameter[name[finfo], name[config]]]]]
if compare[call[name[finfo].get, parameter[constant[type]]] equal[==] constant[directory]] begin[:]
return[call[name[_copy_finfo_directory], parameter[name[finfo], name[storage_dir]]]] | keyword[def] identifier[update_file] ( identifier[finfo] , identifier[sample_info] , identifier[config] , identifier[pass_uptodate] = keyword[False] ):
literal[string]
identifier[storage_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[_get_storage_dir] ( identifier[finfo] , identifier[config] ))
keyword[if] identifier[finfo] . identifier[get] ( literal[string] )== literal[string] :
keyword[return] identifier[_copy_finfo_directory] ( identifier[finfo] , identifier[storage_dir] )
keyword[else] :
keyword[return] identifier[_copy_finfo] ( identifier[finfo] , identifier[storage_dir] , identifier[pass_uptodate] = identifier[pass_uptodate] ) | def update_file(finfo, sample_info, config, pass_uptodate=False):
"""Update the file in local filesystem storage.
"""
storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config))
if finfo.get('type') == 'directory':
return _copy_finfo_directory(finfo, storage_dir) # depends on [control=['if'], data=[]]
else:
return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate) |
def fetch_friends(self, user, paginate=False):
"""
fethces friends from facebook using the oauth_token
fethched by django-social-auth.
Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth
Returns:
collection of friend objects fetched from facebook
"""
if USING_ALLAUTH:
social_app = SocialApp.objects.get_current('facebook')
oauth_token = SocialToken.objects.get(account=user, app=social_app).token
else:
social_auth_backend = FacebookBackend()
# Get the access_token
tokens = social_auth_backend.tokens(user)
oauth_token = tokens['access_token']
graph = facebook.GraphAPI(oauth_token)
friends = graph.get_connections("me", "friends")
if paginate:
total_friends = friends.copy()
total_friends.pop('paging')
while 'paging' in friends and 'next' in friends['paging'] and friends['paging']['next']:
next_url = friends['paging']['next']
next_url_parsed = urlparse.urlparse(next_url)
query_data = urlparse.parse_qs(next_url_parsed.query)
query_data.pop('access_token')
for k, v in query_data.items():
query_data[k] = v[0]
friends = graph.get_connections("me", "friends", **query_data)
total_friends['data'] = sum([total_friends['data'], friends['data']], [])
else:
total_friends = friends
return total_friends | def function[fetch_friends, parameter[self, user, paginate]]:
constant[
fethces friends from facebook using the oauth_token
fethched by django-social-auth.
Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth
Returns:
collection of friend objects fetched from facebook
]
if name[USING_ALLAUTH] begin[:]
variable[social_app] assign[=] call[name[SocialApp].objects.get_current, parameter[constant[facebook]]]
variable[oauth_token] assign[=] call[name[SocialToken].objects.get, parameter[]].token
variable[graph] assign[=] call[name[facebook].GraphAPI, parameter[name[oauth_token]]]
variable[friends] assign[=] call[name[graph].get_connections, parameter[constant[me], constant[friends]]]
if name[paginate] begin[:]
variable[total_friends] assign[=] call[name[friends].copy, parameter[]]
call[name[total_friends].pop, parameter[constant[paging]]]
while <ast.BoolOp object at 0x7da20c6e7d30> begin[:]
variable[next_url] assign[=] call[call[name[friends]][constant[paging]]][constant[next]]
variable[next_url_parsed] assign[=] call[name[urlparse].urlparse, parameter[name[next_url]]]
variable[query_data] assign[=] call[name[urlparse].parse_qs, parameter[name[next_url_parsed].query]]
call[name[query_data].pop, parameter[constant[access_token]]]
for taget[tuple[[<ast.Name object at 0x7da18f722ef0>, <ast.Name object at 0x7da18f721a50>]]] in starred[call[name[query_data].items, parameter[]]] begin[:]
call[name[query_data]][name[k]] assign[=] call[name[v]][constant[0]]
variable[friends] assign[=] call[name[graph].get_connections, parameter[constant[me], constant[friends]]]
call[name[total_friends]][constant[data]] assign[=] call[name[sum], parameter[list[[<ast.Subscript object at 0x7da18f7206d0>, <ast.Subscript object at 0x7da18f723610>]], list[[]]]]
return[name[total_friends]] | keyword[def] identifier[fetch_friends] ( identifier[self] , identifier[user] , identifier[paginate] = keyword[False] ):
literal[string]
keyword[if] identifier[USING_ALLAUTH] :
identifier[social_app] = identifier[SocialApp] . identifier[objects] . identifier[get_current] ( literal[string] )
identifier[oauth_token] = identifier[SocialToken] . identifier[objects] . identifier[get] ( identifier[account] = identifier[user] , identifier[app] = identifier[social_app] ). identifier[token]
keyword[else] :
identifier[social_auth_backend] = identifier[FacebookBackend] ()
identifier[tokens] = identifier[social_auth_backend] . identifier[tokens] ( identifier[user] )
identifier[oauth_token] = identifier[tokens] [ literal[string] ]
identifier[graph] = identifier[facebook] . identifier[GraphAPI] ( identifier[oauth_token] )
identifier[friends] = identifier[graph] . identifier[get_connections] ( literal[string] , literal[string] )
keyword[if] identifier[paginate] :
identifier[total_friends] = identifier[friends] . identifier[copy] ()
identifier[total_friends] . identifier[pop] ( literal[string] )
keyword[while] literal[string] keyword[in] identifier[friends] keyword[and] literal[string] keyword[in] identifier[friends] [ literal[string] ] keyword[and] identifier[friends] [ literal[string] ][ literal[string] ]:
identifier[next_url] = identifier[friends] [ literal[string] ][ literal[string] ]
identifier[next_url_parsed] = identifier[urlparse] . identifier[urlparse] ( identifier[next_url] )
identifier[query_data] = identifier[urlparse] . identifier[parse_qs] ( identifier[next_url_parsed] . identifier[query] )
identifier[query_data] . identifier[pop] ( literal[string] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[query_data] . identifier[items] ():
identifier[query_data] [ identifier[k] ]= identifier[v] [ literal[int] ]
identifier[friends] = identifier[graph] . identifier[get_connections] ( literal[string] , literal[string] ,** identifier[query_data] )
identifier[total_friends] [ literal[string] ]= identifier[sum] ([ identifier[total_friends] [ literal[string] ], identifier[friends] [ literal[string] ]],[])
keyword[else] :
identifier[total_friends] = identifier[friends]
keyword[return] identifier[total_friends] | def fetch_friends(self, user, paginate=False):
"""
fethces friends from facebook using the oauth_token
fethched by django-social-auth.
Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth
Returns:
collection of friend objects fetched from facebook
"""
if USING_ALLAUTH:
social_app = SocialApp.objects.get_current('facebook')
oauth_token = SocialToken.objects.get(account=user, app=social_app).token # depends on [control=['if'], data=[]]
else:
social_auth_backend = FacebookBackend()
# Get the access_token
tokens = social_auth_backend.tokens(user)
oauth_token = tokens['access_token']
graph = facebook.GraphAPI(oauth_token)
friends = graph.get_connections('me', 'friends')
if paginate:
total_friends = friends.copy()
total_friends.pop('paging')
while 'paging' in friends and 'next' in friends['paging'] and friends['paging']['next']:
next_url = friends['paging']['next']
next_url_parsed = urlparse.urlparse(next_url)
query_data = urlparse.parse_qs(next_url_parsed.query)
query_data.pop('access_token')
for (k, v) in query_data.items():
query_data[k] = v[0] # depends on [control=['for'], data=[]]
friends = graph.get_connections('me', 'friends', **query_data)
total_friends['data'] = sum([total_friends['data'], friends['data']], []) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
total_friends = friends
return total_friends |
def has_submenu_items(self, current_page, allow_repeating_parents,
original_menu_tag, menu_instance=None, request=None):
"""
When rendering pages in a menu template a `has_children_in_menu`
attribute is added to each page, letting template developers know
whether or not the item has a submenu that must be rendered.
By default, we return a boolean indicating whether the page has
suitable child pages to include in such a menu. But, if you are
overriding the `modify_submenu_items` method to programatically add
items that aren't child pages, you'll likely need to alter this method
too, so the template knows there are sub items to be rendered.
"""
return menu_instance.page_has_children(self) | def function[has_submenu_items, parameter[self, current_page, allow_repeating_parents, original_menu_tag, menu_instance, request]]:
constant[
When rendering pages in a menu template a `has_children_in_menu`
attribute is added to each page, letting template developers know
whether or not the item has a submenu that must be rendered.
By default, we return a boolean indicating whether the page has
suitable child pages to include in such a menu. But, if you are
overriding the `modify_submenu_items` method to programatically add
items that aren't child pages, you'll likely need to alter this method
too, so the template knows there are sub items to be rendered.
]
return[call[name[menu_instance].page_has_children, parameter[name[self]]]] | keyword[def] identifier[has_submenu_items] ( identifier[self] , identifier[current_page] , identifier[allow_repeating_parents] ,
identifier[original_menu_tag] , identifier[menu_instance] = keyword[None] , identifier[request] = keyword[None] ):
literal[string]
keyword[return] identifier[menu_instance] . identifier[page_has_children] ( identifier[self] ) | def has_submenu_items(self, current_page, allow_repeating_parents, original_menu_tag, menu_instance=None, request=None):
"""
When rendering pages in a menu template a `has_children_in_menu`
attribute is added to each page, letting template developers know
whether or not the item has a submenu that must be rendered.
By default, we return a boolean indicating whether the page has
suitable child pages to include in such a menu. But, if you are
overriding the `modify_submenu_items` method to programatically add
items that aren't child pages, you'll likely need to alter this method
too, so the template knows there are sub items to be rendered.
"""
return menu_instance.page_has_children(self) |
def run(self, *args, **kwargs):
"""Iterate through all AWS accounts and apply roles and policies from Github
Args:
*args: Optional list of arguments
**kwargs: Optional list of keyword arguments
Returns:
`None`
"""
accounts = list(AWSAccount.get_all(include_disabled=False).values())
self.manage_policies(accounts) | def function[run, parameter[self]]:
constant[Iterate through all AWS accounts and apply roles and policies from Github
Args:
*args: Optional list of arguments
**kwargs: Optional list of keyword arguments
Returns:
`None`
]
variable[accounts] assign[=] call[name[list], parameter[call[call[name[AWSAccount].get_all, parameter[]].values, parameter[]]]]
call[name[self].manage_policies, parameter[name[accounts]]] | keyword[def] identifier[run] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[accounts] = identifier[list] ( identifier[AWSAccount] . identifier[get_all] ( identifier[include_disabled] = keyword[False] ). identifier[values] ())
identifier[self] . identifier[manage_policies] ( identifier[accounts] ) | def run(self, *args, **kwargs):
"""Iterate through all AWS accounts and apply roles and policies from Github
Args:
*args: Optional list of arguments
**kwargs: Optional list of keyword arguments
Returns:
`None`
"""
accounts = list(AWSAccount.get_all(include_disabled=False).values())
self.manage_policies(accounts) |
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
ssd.zplane(self.b,[1],auto_scale,size,tol) | def function[zplane, parameter[self, auto_scale, size, detect_mult, tol]]:
constant[
Plot the poles and zeros of the FIR filter in the z-plane
]
call[name[ssd].zplane, parameter[name[self].b, list[[<ast.Constant object at 0x7da18f721d50>]], name[auto_scale], name[size], name[tol]]] | keyword[def] identifier[zplane] ( identifier[self] , identifier[auto_scale] = keyword[True] , identifier[size] = literal[int] , identifier[detect_mult] = keyword[True] , identifier[tol] = literal[int] ):
literal[string]
identifier[ssd] . identifier[zplane] ( identifier[self] . identifier[b] ,[ literal[int] ], identifier[auto_scale] , identifier[size] , identifier[tol] ) | def zplane(self, auto_scale=True, size=2, detect_mult=True, tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
ssd.zplane(self.b, [1], auto_scale, size, tol) |
def update_appt(self, complex: str, house: str, price: str, square: str, id: str, **kwargs):
"""
Update existing appartment
"""
self.check_house(complex, house)
kwargs['price'] = self._format_decimal(price)
kwargs['square'] = self._format_decimal(square)
self.put('developers/{developer}/complexes/{complex}/houses/{house}/appts/{id}'.format(
developer=self.developer,
complex=complex,
house=house,
id=id,
price=self._format_decimal(price),
), data=kwargs) | def function[update_appt, parameter[self, complex, house, price, square, id]]:
constant[
Update existing appartment
]
call[name[self].check_house, parameter[name[complex], name[house]]]
call[name[kwargs]][constant[price]] assign[=] call[name[self]._format_decimal, parameter[name[price]]]
call[name[kwargs]][constant[square]] assign[=] call[name[self]._format_decimal, parameter[name[square]]]
call[name[self].put, parameter[call[constant[developers/{developer}/complexes/{complex}/houses/{house}/appts/{id}].format, parameter[]]]] | keyword[def] identifier[update_appt] ( identifier[self] , identifier[complex] : identifier[str] , identifier[house] : identifier[str] , identifier[price] : identifier[str] , identifier[square] : identifier[str] , identifier[id] : identifier[str] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[check_house] ( identifier[complex] , identifier[house] )
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_format_decimal] ( identifier[price] )
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_format_decimal] ( identifier[square] )
identifier[self] . identifier[put] ( literal[string] . identifier[format] (
identifier[developer] = identifier[self] . identifier[developer] ,
identifier[complex] = identifier[complex] ,
identifier[house] = identifier[house] ,
identifier[id] = identifier[id] ,
identifier[price] = identifier[self] . identifier[_format_decimal] ( identifier[price] ),
), identifier[data] = identifier[kwargs] ) | def update_appt(self, complex: str, house: str, price: str, square: str, id: str, **kwargs):
"""
Update existing appartment
"""
self.check_house(complex, house)
kwargs['price'] = self._format_decimal(price)
kwargs['square'] = self._format_decimal(square)
self.put('developers/{developer}/complexes/{complex}/houses/{house}/appts/{id}'.format(developer=self.developer, complex=complex, house=house, id=id, price=self._format_decimal(price)), data=kwargs) |
async def amerge(*agens) -> AsyncGenerator[Any, None]:
"""Thin wrapper around aiostream.stream.merge."""
xs = stream.merge(*agens)
async with xs.stream() as streamer:
async for x in streamer:
yield x | <ast.AsyncFunctionDef object at 0x7da1b0930490> | keyword[async] keyword[def] identifier[amerge] (* identifier[agens] )-> identifier[AsyncGenerator] [ identifier[Any] , keyword[None] ]:
literal[string]
identifier[xs] = identifier[stream] . identifier[merge] (* identifier[agens] )
keyword[async] keyword[with] identifier[xs] . identifier[stream] () keyword[as] identifier[streamer] :
keyword[async] keyword[for] identifier[x] keyword[in] identifier[streamer] :
keyword[yield] identifier[x] | async def amerge(*agens) -> AsyncGenerator[Any, None]:
"""Thin wrapper around aiostream.stream.merge."""
xs = stream.merge(*agens)
async with xs.stream() as streamer:
async for x in streamer:
yield x |
def get_code_breakpoint(self, dwProcessId, address):
"""
Returns the internally used breakpoint object,
for the code breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint},
L{erase_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{CodeBreakpoint}
@return: The code breakpoint object.
"""
key = (dwProcessId, address)
if key not in self.__codeBP:
msg = "No breakpoint at process %d, address %s"
address = HexDump.address(address)
raise KeyError(msg % (dwProcessId, address))
return self.__codeBP[key] | def function[get_code_breakpoint, parameter[self, dwProcessId, address]]:
constant[
Returns the internally used breakpoint object,
for the code breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint},
L{erase_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{CodeBreakpoint}
@return: The code breakpoint object.
]
variable[key] assign[=] tuple[[<ast.Name object at 0x7da18c4cfee0>, <ast.Name object at 0x7da18c4cd120>]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].__codeBP] begin[:]
variable[msg] assign[=] constant[No breakpoint at process %d, address %s]
variable[address] assign[=] call[name[HexDump].address, parameter[name[address]]]
<ast.Raise object at 0x7da18c4cfbb0>
return[call[name[self].__codeBP][name[key]]] | keyword[def] identifier[get_code_breakpoint] ( identifier[self] , identifier[dwProcessId] , identifier[address] ):
literal[string]
identifier[key] =( identifier[dwProcessId] , identifier[address] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[__codeBP] :
identifier[msg] = literal[string]
identifier[address] = identifier[HexDump] . identifier[address] ( identifier[address] )
keyword[raise] identifier[KeyError] ( identifier[msg] %( identifier[dwProcessId] , identifier[address] ))
keyword[return] identifier[self] . identifier[__codeBP] [ identifier[key] ] | def get_code_breakpoint(self, dwProcessId, address):
"""
Returns the internally used breakpoint object,
for the code breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint},
L{erase_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{CodeBreakpoint}
@return: The code breakpoint object.
"""
key = (dwProcessId, address)
if key not in self.__codeBP:
msg = 'No breakpoint at process %d, address %s'
address = HexDump.address(address)
raise KeyError(msg % (dwProcessId, address)) # depends on [control=['if'], data=[]]
return self.__codeBP[key] |
def instruction_ST8(self, opcode, ea, register):
"""
Writes the contents of an 8-bit register into a memory location.
source code forms: STA P; STB P
CC bits "HNZVC": -aa0-
"""
value = register.value
# log.debug("$%x ST8 store value $%x from %s at $%x \t| %s" % (
# self.program_counter,
# value, register.name, ea,
# self.cfg.mem_info.get_shortest(ea)
# ))
self.clear_NZV()
self.update_NZ_8(value)
return ea, value | def function[instruction_ST8, parameter[self, opcode, ea, register]]:
constant[
Writes the contents of an 8-bit register into a memory location.
source code forms: STA P; STB P
CC bits "HNZVC": -aa0-
]
variable[value] assign[=] name[register].value
call[name[self].clear_NZV, parameter[]]
call[name[self].update_NZ_8, parameter[name[value]]]
return[tuple[[<ast.Name object at 0x7da18f00de10>, <ast.Name object at 0x7da18f00d240>]]] | keyword[def] identifier[instruction_ST8] ( identifier[self] , identifier[opcode] , identifier[ea] , identifier[register] ):
literal[string]
identifier[value] = identifier[register] . identifier[value]
identifier[self] . identifier[clear_NZV] ()
identifier[self] . identifier[update_NZ_8] ( identifier[value] )
keyword[return] identifier[ea] , identifier[value] | def instruction_ST8(self, opcode, ea, register):
"""
Writes the contents of an 8-bit register into a memory location.
source code forms: STA P; STB P
CC bits "HNZVC": -aa0-
"""
value = register.value
# log.debug("$%x ST8 store value $%x from %s at $%x \t| %s" % (
# self.program_counter,
# value, register.name, ea,
# self.cfg.mem_info.get_shortest(ea)
# ))
self.clear_NZV()
self.update_NZ_8(value)
return (ea, value) |
def run(self):
'''
Make the salt client call
'''
if self.opts['chunked']:
ret = self.run_chunked()
else:
ret = self.run_oldstyle()
salt.output.display_output(
ret,
self.opts.get('output', 'nested'),
self.opts) | def function[run, parameter[self]]:
constant[
Make the salt client call
]
if call[name[self].opts][constant[chunked]] begin[:]
variable[ret] assign[=] call[name[self].run_chunked, parameter[]]
call[name[salt].output.display_output, parameter[name[ret], call[name[self].opts.get, parameter[constant[output], constant[nested]]], name[self].opts]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]:
identifier[ret] = identifier[self] . identifier[run_chunked] ()
keyword[else] :
identifier[ret] = identifier[self] . identifier[run_oldstyle] ()
identifier[salt] . identifier[output] . identifier[display_output] (
identifier[ret] ,
identifier[self] . identifier[opts] . identifier[get] ( literal[string] , literal[string] ),
identifier[self] . identifier[opts] ) | def run(self):
"""
Make the salt client call
"""
if self.opts['chunked']:
ret = self.run_chunked() # depends on [control=['if'], data=[]]
else:
ret = self.run_oldstyle()
salt.output.display_output(ret, self.opts.get('output', 'nested'), self.opts) |
def getPluginActions(self, index):
"""Return actions from plug-in at `index`
Arguments:
index (int): Index at which item is located in model
"""
index = self.data["proxies"]["plugin"].mapToSource(
self.data["proxies"]["plugin"].index(
index, 0, QtCore.QModelIndex())).row()
item = self.data["models"]["item"].items[index]
# Inject reference to the original index
actions = [
dict(action, **{"index": index})
for action in item.actions
]
# Context specific actions
for action in list(actions):
if action["on"] == "failed" and not item.hasError:
actions.remove(action)
if action["on"] == "succeeded" and not item.succeeded:
actions.remove(action)
if action["on"] == "processed" and not item.processed:
actions.remove(action)
if action["on"] == "notProcessed" and item.processed:
actions.remove(action)
# Discard empty categories, separators
remaining_actions = list()
index = 0
try:
action = actions[index]
except IndexError:
pass
else:
while action:
try:
action = actions[index]
except IndexError:
break
isempty = False
if action["__type__"] in ("category", "separator"):
try:
next_ = actions[index + 1]
if next_["__type__"] != "action":
isempty = True
except IndexError:
isempty = True
if not isempty:
remaining_actions.append(action)
index += 1
return remaining_actions | def function[getPluginActions, parameter[self, index]]:
constant[Return actions from plug-in at `index`
Arguments:
index (int): Index at which item is located in model
]
variable[index] assign[=] call[call[call[call[name[self].data][constant[proxies]]][constant[plugin]].mapToSource, parameter[call[call[call[name[self].data][constant[proxies]]][constant[plugin]].index, parameter[name[index], constant[0], call[name[QtCore].QModelIndex, parameter[]]]]]].row, parameter[]]
variable[item] assign[=] call[call[call[name[self].data][constant[models]]][constant[item]].items][name[index]]
variable[actions] assign[=] <ast.ListComp object at 0x7da1b08b96f0>
for taget[name[action]] in starred[call[name[list], parameter[name[actions]]]] begin[:]
if <ast.BoolOp object at 0x7da1b08bae00> begin[:]
call[name[actions].remove, parameter[name[action]]]
if <ast.BoolOp object at 0x7da1b08bac50> begin[:]
call[name[actions].remove, parameter[name[action]]]
if <ast.BoolOp object at 0x7da1b08ba740> begin[:]
call[name[actions].remove, parameter[name[action]]]
if <ast.BoolOp object at 0x7da2045645b0> begin[:]
call[name[actions].remove, parameter[name[action]]]
variable[remaining_actions] assign[=] call[name[list], parameter[]]
variable[index] assign[=] constant[0]
<ast.Try object at 0x7da20e956050>
return[name[remaining_actions]] | keyword[def] identifier[getPluginActions] ( identifier[self] , identifier[index] ):
literal[string]
identifier[index] = identifier[self] . identifier[data] [ literal[string] ][ literal[string] ]. identifier[mapToSource] (
identifier[self] . identifier[data] [ literal[string] ][ literal[string] ]. identifier[index] (
identifier[index] , literal[int] , identifier[QtCore] . identifier[QModelIndex] ())). identifier[row] ()
identifier[item] = identifier[self] . identifier[data] [ literal[string] ][ literal[string] ]. identifier[items] [ identifier[index] ]
identifier[actions] =[
identifier[dict] ( identifier[action] ,**{ literal[string] : identifier[index] })
keyword[for] identifier[action] keyword[in] identifier[item] . identifier[actions]
]
keyword[for] identifier[action] keyword[in] identifier[list] ( identifier[actions] ):
keyword[if] identifier[action] [ literal[string] ]== literal[string] keyword[and] keyword[not] identifier[item] . identifier[hasError] :
identifier[actions] . identifier[remove] ( identifier[action] )
keyword[if] identifier[action] [ literal[string] ]== literal[string] keyword[and] keyword[not] identifier[item] . identifier[succeeded] :
identifier[actions] . identifier[remove] ( identifier[action] )
keyword[if] identifier[action] [ literal[string] ]== literal[string] keyword[and] keyword[not] identifier[item] . identifier[processed] :
identifier[actions] . identifier[remove] ( identifier[action] )
keyword[if] identifier[action] [ literal[string] ]== literal[string] keyword[and] identifier[item] . identifier[processed] :
identifier[actions] . identifier[remove] ( identifier[action] )
identifier[remaining_actions] = identifier[list] ()
identifier[index] = literal[int]
keyword[try] :
identifier[action] = identifier[actions] [ identifier[index] ]
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[else] :
keyword[while] identifier[action] :
keyword[try] :
identifier[action] = identifier[actions] [ identifier[index] ]
keyword[except] identifier[IndexError] :
keyword[break]
identifier[isempty] = keyword[False]
keyword[if] identifier[action] [ literal[string] ] keyword[in] ( literal[string] , literal[string] ):
keyword[try] :
identifier[next_] = identifier[actions] [ identifier[index] + literal[int] ]
keyword[if] identifier[next_] [ literal[string] ]!= literal[string] :
identifier[isempty] = keyword[True]
keyword[except] identifier[IndexError] :
identifier[isempty] = keyword[True]
keyword[if] keyword[not] identifier[isempty] :
identifier[remaining_actions] . identifier[append] ( identifier[action] )
identifier[index] += literal[int]
keyword[return] identifier[remaining_actions] | def getPluginActions(self, index):
"""Return actions from plug-in at `index`
Arguments:
index (int): Index at which item is located in model
"""
index = self.data['proxies']['plugin'].mapToSource(self.data['proxies']['plugin'].index(index, 0, QtCore.QModelIndex())).row()
item = self.data['models']['item'].items[index]
# Inject reference to the original index
actions = [dict(action, **{'index': index}) for action in item.actions]
# Context specific actions
for action in list(actions):
if action['on'] == 'failed' and (not item.hasError):
actions.remove(action) # depends on [control=['if'], data=[]]
if action['on'] == 'succeeded' and (not item.succeeded):
actions.remove(action) # depends on [control=['if'], data=[]]
if action['on'] == 'processed' and (not item.processed):
actions.remove(action) # depends on [control=['if'], data=[]]
if action['on'] == 'notProcessed' and item.processed:
actions.remove(action) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['action']]
# Discard empty categories, separators
remaining_actions = list()
index = 0
try:
action = actions[index] # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]]
else:
while action:
try:
action = actions[index] # depends on [control=['try'], data=[]]
except IndexError:
break # depends on [control=['except'], data=[]]
isempty = False
if action['__type__'] in ('category', 'separator'):
try:
next_ = actions[index + 1]
if next_['__type__'] != 'action':
isempty = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
isempty = True # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if not isempty:
remaining_actions.append(action) # depends on [control=['if'], data=[]]
index += 1 # depends on [control=['while'], data=[]]
return remaining_actions |
def action(method=None, **kwargs):
"""
Decorator that turns a function or controller method into an kervi action.
it is possible to call the action in other kervi processes or modules.
@action
def my_action(p)
...
call it via Actions["my_action"](10)
@action(action_id="action_1", name="This is my action")
def my_action(p)
...
call it via Actions["action_1"](10)
:Keyword Arguments:
* *action_id* (``str``) --
The action_id is the id you use when you call the action.
By default the action takes the name of function but you can override it with action_id.
* *name* (``str``) -- Name to show in UI if the action is linked to a panel.
"""
def action_wrap(f):
action_id = kwargs.get("action_id", f.__name__)
name = kwargs.get("name", action_id)
if not _is_method(f): # not "." in f.__qualname__:
action = Action(f, action_id, name)
Actions.add(action)
return action
else:
qual_name = getattr(f, "__qualname__", None)
owner_class = kwargs.get("controller_class", None)
if owner_class:
qual_name = owner_class + "." + f.__name__
if qual_name:
Actions.add_unbound(qual_name, action_id, name)
setattr(f, "set_interrupt", _SetInterrupt(action_id))
else:
print("using upython? if yes you need to pass the name of the controller class via the controller_class parameter.")
return f
if method:
return action_wrap(method)
else:
return action_wrap | def function[action, parameter[method]]:
constant[
Decorator that turns a function or controller method into an kervi action.
it is possible to call the action in other kervi processes or modules.
@action
def my_action(p)
...
call it via Actions["my_action"](10)
@action(action_id="action_1", name="This is my action")
def my_action(p)
...
call it via Actions["action_1"](10)
:Keyword Arguments:
* *action_id* (``str``) --
The action_id is the id you use when you call the action.
By default the action takes the name of function but you can override it with action_id.
* *name* (``str``) -- Name to show in UI if the action is linked to a panel.
]
def function[action_wrap, parameter[f]]:
variable[action_id] assign[=] call[name[kwargs].get, parameter[constant[action_id], name[f].__name__]]
variable[name] assign[=] call[name[kwargs].get, parameter[constant[name], name[action_id]]]
if <ast.UnaryOp object at 0x7da204345a50> begin[:]
variable[action] assign[=] call[name[Action], parameter[name[f], name[action_id], name[name]]]
call[name[Actions].add, parameter[name[action]]]
return[name[action]]
if name[method] begin[:]
return[call[name[action_wrap], parameter[name[method]]]] | keyword[def] identifier[action] ( identifier[method] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[action_wrap] ( identifier[f] ):
identifier[action_id] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[f] . identifier[__name__] )
identifier[name] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[action_id] )
keyword[if] keyword[not] identifier[_is_method] ( identifier[f] ):
identifier[action] = identifier[Action] ( identifier[f] , identifier[action_id] , identifier[name] )
identifier[Actions] . identifier[add] ( identifier[action] )
keyword[return] identifier[action]
keyword[else] :
identifier[qual_name] = identifier[getattr] ( identifier[f] , literal[string] , keyword[None] )
identifier[owner_class] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[owner_class] :
identifier[qual_name] = identifier[owner_class] + literal[string] + identifier[f] . identifier[__name__]
keyword[if] identifier[qual_name] :
identifier[Actions] . identifier[add_unbound] ( identifier[qual_name] , identifier[action_id] , identifier[name] )
identifier[setattr] ( identifier[f] , literal[string] , identifier[_SetInterrupt] ( identifier[action_id] ))
keyword[else] :
identifier[print] ( literal[string] )
keyword[return] identifier[f]
keyword[if] identifier[method] :
keyword[return] identifier[action_wrap] ( identifier[method] )
keyword[else] :
keyword[return] identifier[action_wrap] | def action(method=None, **kwargs):
"""
Decorator that turns a function or controller method into an kervi action.
it is possible to call the action in other kervi processes or modules.
@action
def my_action(p)
...
call it via Actions["my_action"](10)
@action(action_id="action_1", name="This is my action")
def my_action(p)
...
call it via Actions["action_1"](10)
:Keyword Arguments:
* *action_id* (``str``) --
The action_id is the id you use when you call the action.
By default the action takes the name of function but you can override it with action_id.
* *name* (``str``) -- Name to show in UI if the action is linked to a panel.
"""
def action_wrap(f):
action_id = kwargs.get('action_id', f.__name__)
name = kwargs.get('name', action_id)
if not _is_method(f): # not "." in f.__qualname__:
action = Action(f, action_id, name)
Actions.add(action)
return action # depends on [control=['if'], data=[]]
else:
qual_name = getattr(f, '__qualname__', None)
owner_class = kwargs.get('controller_class', None)
if owner_class:
qual_name = owner_class + '.' + f.__name__ # depends on [control=['if'], data=[]]
if qual_name:
Actions.add_unbound(qual_name, action_id, name)
setattr(f, 'set_interrupt', _SetInterrupt(action_id)) # depends on [control=['if'], data=[]]
else:
print('using upython? if yes you need to pass the name of the controller class via the controller_class parameter.')
return f
if method:
return action_wrap(method) # depends on [control=['if'], data=[]]
else:
return action_wrap |
def refinements(self):
"""Details for the visualization."""
self.rc("setattr a color gray @CENTROID")
self.rc("setattr a radius 0.3 @CENTROID")
self.rc("represent sphere @CENTROID")
self.rc("setattr a color orange @CHARGE")
self.rc("setattr a radius 0.4 @CHARGE")
self.rc("represent sphere @CHARGE")
self.rc("display :pseudoatoms") | def function[refinements, parameter[self]]:
constant[Details for the visualization.]
call[name[self].rc, parameter[constant[setattr a color gray @CENTROID]]]
call[name[self].rc, parameter[constant[setattr a radius 0.3 @CENTROID]]]
call[name[self].rc, parameter[constant[represent sphere @CENTROID]]]
call[name[self].rc, parameter[constant[setattr a color orange @CHARGE]]]
call[name[self].rc, parameter[constant[setattr a radius 0.4 @CHARGE]]]
call[name[self].rc, parameter[constant[represent sphere @CHARGE]]]
call[name[self].rc, parameter[constant[display :pseudoatoms]]] | keyword[def] identifier[refinements] ( identifier[self] ):
literal[string]
identifier[self] . identifier[rc] ( literal[string] )
identifier[self] . identifier[rc] ( literal[string] )
identifier[self] . identifier[rc] ( literal[string] )
identifier[self] . identifier[rc] ( literal[string] )
identifier[self] . identifier[rc] ( literal[string] )
identifier[self] . identifier[rc] ( literal[string] )
identifier[self] . identifier[rc] ( literal[string] ) | def refinements(self):
"""Details for the visualization."""
self.rc('setattr a color gray @CENTROID')
self.rc('setattr a radius 0.3 @CENTROID')
self.rc('represent sphere @CENTROID')
self.rc('setattr a color orange @CHARGE')
self.rc('setattr a radius 0.4 @CHARGE')
self.rc('represent sphere @CHARGE')
self.rc('display :pseudoatoms') |
def get_item(item, **kwargs):
"""
API versioning for each OpenStack service is independent. Generically capture
the public members (non-routine and non-private) of the OpenStack SDK objects.
Note the lack of the modify_output decorator. Preserving the field naming allows
us to reconstruct objects and orchestrate from stored items.
"""
_item = {}
for k,v in inspect.getmembers(item, lambda a:not(inspect.isroutine(a))):
if not k.startswith('_') and not k in ignore_list:
_item[k] = v
return sub_dict(_item) | def function[get_item, parameter[item]]:
constant[
API versioning for each OpenStack service is independent. Generically capture
the public members (non-routine and non-private) of the OpenStack SDK objects.
Note the lack of the modify_output decorator. Preserving the field naming allows
us to reconstruct objects and orchestrate from stored items.
]
variable[_item] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b01521a0>, <ast.Name object at 0x7da1b0153160>]]] in starred[call[name[inspect].getmembers, parameter[name[item], <ast.Lambda object at 0x7da1b0150d00>]]] begin[:]
if <ast.BoolOp object at 0x7da1b0153970> begin[:]
call[name[_item]][name[k]] assign[=] name[v]
return[call[name[sub_dict], parameter[name[_item]]]] | keyword[def] identifier[get_item] ( identifier[item] ,** identifier[kwargs] ):
literal[string]
identifier[_item] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[item] , keyword[lambda] identifier[a] : keyword[not] ( identifier[inspect] . identifier[isroutine] ( identifier[a] ))):
keyword[if] keyword[not] identifier[k] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[k] keyword[in] identifier[ignore_list] :
identifier[_item] [ identifier[k] ]= identifier[v]
keyword[return] identifier[sub_dict] ( identifier[_item] ) | def get_item(item, **kwargs):
"""
API versioning for each OpenStack service is independent. Generically capture
the public members (non-routine and non-private) of the OpenStack SDK objects.
Note the lack of the modify_output decorator. Preserving the field naming allows
us to reconstruct objects and orchestrate from stored items.
"""
_item = {}
for (k, v) in inspect.getmembers(item, lambda a: not inspect.isroutine(a)):
if not k.startswith('_') and (not k in ignore_list):
_item[k] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return sub_dict(_item) |
def as_artist(self, origin=(0, 0), **kwargs):
"""
Matplotlib Text object for this region (`matplotlib.text.Text`).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.text.Text` object accepts
Returns
-------
text : `~matplotlib.text.Text`
Matplotlib Text object.
"""
from matplotlib.text import Text
mpl_params = self.mpl_properties_default('text')
mpl_params.update(kwargs)
text = Text(self.center.x - origin[0], self.center.y - origin[1],
self.text, **mpl_params)
return text | def function[as_artist, parameter[self, origin]]:
constant[
Matplotlib Text object for this region (`matplotlib.text.Text`).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.text.Text` object accepts
Returns
-------
text : `~matplotlib.text.Text`
Matplotlib Text object.
]
from relative_module[matplotlib.text] import module[Text]
variable[mpl_params] assign[=] call[name[self].mpl_properties_default, parameter[constant[text]]]
call[name[mpl_params].update, parameter[name[kwargs]]]
variable[text] assign[=] call[name[Text], parameter[binary_operation[name[self].center.x - call[name[origin]][constant[0]]], binary_operation[name[self].center.y - call[name[origin]][constant[1]]], name[self].text]]
return[name[text]] | keyword[def] identifier[as_artist] ( identifier[self] , identifier[origin] =( literal[int] , literal[int] ),** identifier[kwargs] ):
literal[string]
keyword[from] identifier[matplotlib] . identifier[text] keyword[import] identifier[Text]
identifier[mpl_params] = identifier[self] . identifier[mpl_properties_default] ( literal[string] )
identifier[mpl_params] . identifier[update] ( identifier[kwargs] )
identifier[text] = identifier[Text] ( identifier[self] . identifier[center] . identifier[x] - identifier[origin] [ literal[int] ], identifier[self] . identifier[center] . identifier[y] - identifier[origin] [ literal[int] ],
identifier[self] . identifier[text] ,** identifier[mpl_params] )
keyword[return] identifier[text] | def as_artist(self, origin=(0, 0), **kwargs):
"""
Matplotlib Text object for this region (`matplotlib.text.Text`).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` pixel position of the origin of the displayed image.
Default is (0, 0).
kwargs : `dict`
All keywords that a `~matplotlib.text.Text` object accepts
Returns
-------
text : `~matplotlib.text.Text`
Matplotlib Text object.
"""
from matplotlib.text import Text
mpl_params = self.mpl_properties_default('text')
mpl_params.update(kwargs)
text = Text(self.center.x - origin[0], self.center.y - origin[1], self.text, **mpl_params)
return text |
def _parse_topic_path(topic_path):
"""Verify that a topic path is in the correct format.
.. _resource manager docs: https://cloud.google.com/resource-manager/\
reference/rest/v1beta1/projects#\
Project.FIELDS.project_id
.. _topic spec: https://cloud.google.com/storage/docs/json_api/v1/\
notifications/insert#topic
Expected to be of the form:
//pubsub.googleapis.com/projects/{project}/topics/{topic}
where the ``project`` value must be "6 to 30 lowercase letters, digits,
or hyphens. It must start with a letter. Trailing hyphens are prohibited."
(see `resource manager docs`_) and ``topic`` must have length at least two,
must start with a letter and may only contain alphanumeric characters or
``-``, ``_``, ``.``, ``~``, ``+`` or ``%`` (i.e characters used for URL
encoding, see `topic spec`_).
Args:
topic_path (str): The topic path to be verified.
Returns:
Tuple[str, str]: The ``project`` and ``topic`` parsed from the
``topic_path``.
Raises:
ValueError: If the topic path is invalid.
"""
match = _TOPIC_REF_RE.match(topic_path)
if match is None:
raise ValueError(_BAD_TOPIC.format(topic_path))
return match.group("name"), match.group("project") | def function[_parse_topic_path, parameter[topic_path]]:
constant[Verify that a topic path is in the correct format.
.. _resource manager docs: https://cloud.google.com/resource-manager/ reference/rest/v1beta1/projects# Project.FIELDS.project_id
.. _topic spec: https://cloud.google.com/storage/docs/json_api/v1/ notifications/insert#topic
Expected to be of the form:
//pubsub.googleapis.com/projects/{project}/topics/{topic}
where the ``project`` value must be "6 to 30 lowercase letters, digits,
or hyphens. It must start with a letter. Trailing hyphens are prohibited."
(see `resource manager docs`_) and ``topic`` must have length at least two,
must start with a letter and may only contain alphanumeric characters or
``-``, ``_``, ``.``, ``~``, ``+`` or ``%`` (i.e characters used for URL
encoding, see `topic spec`_).
Args:
topic_path (str): The topic path to be verified.
Returns:
Tuple[str, str]: The ``project`` and ``topic`` parsed from the
``topic_path``.
Raises:
ValueError: If the topic path is invalid.
]
variable[match] assign[=] call[name[_TOPIC_REF_RE].match, parameter[name[topic_path]]]
if compare[name[match] is constant[None]] begin[:]
<ast.Raise object at 0x7da204344790>
return[tuple[[<ast.Call object at 0x7da204347760>, <ast.Call object at 0x7da204344970>]]] | keyword[def] identifier[_parse_topic_path] ( identifier[topic_path] ):
literal[string]
identifier[match] = identifier[_TOPIC_REF_RE] . identifier[match] ( identifier[topic_path] )
keyword[if] identifier[match] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( identifier[_BAD_TOPIC] . identifier[format] ( identifier[topic_path] ))
keyword[return] identifier[match] . identifier[group] ( literal[string] ), identifier[match] . identifier[group] ( literal[string] ) | def _parse_topic_path(topic_path):
"""Verify that a topic path is in the correct format.
.. _resource manager docs: https://cloud.google.com/resource-manager/ reference/rest/v1beta1/projects# Project.FIELDS.project_id
.. _topic spec: https://cloud.google.com/storage/docs/json_api/v1/ notifications/insert#topic
Expected to be of the form:
//pubsub.googleapis.com/projects/{project}/topics/{topic}
where the ``project`` value must be "6 to 30 lowercase letters, digits,
or hyphens. It must start with a letter. Trailing hyphens are prohibited."
(see `resource manager docs`_) and ``topic`` must have length at least two,
must start with a letter and may only contain alphanumeric characters or
``-``, ``_``, ``.``, ``~``, ``+`` or ``%`` (i.e characters used for URL
encoding, see `topic spec`_).
Args:
topic_path (str): The topic path to be verified.
Returns:
Tuple[str, str]: The ``project`` and ``topic`` parsed from the
``topic_path``.
Raises:
ValueError: If the topic path is invalid.
"""
match = _TOPIC_REF_RE.match(topic_path)
if match is None:
raise ValueError(_BAD_TOPIC.format(topic_path)) # depends on [control=['if'], data=[]]
return (match.group('name'), match.group('project')) |
def get_default(self, *args, **kwargs):
"""Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
"""
def retrieve_param(i):
try:
return self.__getattribute__(i)
except AttributeError:
if i == "device":
return self.default_device
else:
return self.__getattribute__(i.upper())
if len(args) == 0:
if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None:
return kwargs[list(kwargs.keys())[0]]
elif len(kwargs) == 1:
return retrieve_param(list(kwargs.keys())[0])
else:
raise TypeError("As dict is unordered, it is impossible to give"
"the parameters in the correct order.")
else:
out = []
for i in args:
if i[1] is None:
out.append(retrieve_param(i[0]))
else:
out.append(i[1])
return out | def function[get_default, parameter[self]]:
constant[Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
]
def function[retrieve_param, parameter[i]]:
<ast.Try object at 0x7da20e9540d0>
if compare[call[name[len], parameter[name[args]]] equal[==] constant[0]] begin[:]
if <ast.BoolOp object at 0x7da20e9561d0> begin[:]
return[call[name[kwargs]][call[call[name[list], parameter[call[name[kwargs].keys, parameter[]]]]][constant[0]]]] | keyword[def] identifier[get_default] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[retrieve_param] ( identifier[i] ):
keyword[try] :
keyword[return] identifier[self] . identifier[__getattribute__] ( identifier[i] )
keyword[except] identifier[AttributeError] :
keyword[if] identifier[i] == literal[string] :
keyword[return] identifier[self] . identifier[default_device]
keyword[else] :
keyword[return] identifier[self] . identifier[__getattribute__] ( identifier[i] . identifier[upper] ())
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
keyword[if] identifier[len] ( identifier[kwargs] )== literal[int] keyword[and] identifier[kwargs] [ identifier[list] ( identifier[kwargs] . identifier[keys] ())[ literal[int] ]] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[kwargs] [ identifier[list] ( identifier[kwargs] . identifier[keys] ())[ literal[int] ]]
keyword[elif] identifier[len] ( identifier[kwargs] )== literal[int] :
keyword[return] identifier[retrieve_param] ( identifier[list] ( identifier[kwargs] . identifier[keys] ())[ literal[int] ])
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[else] :
identifier[out] =[]
keyword[for] identifier[i] keyword[in] identifier[args] :
keyword[if] identifier[i] [ literal[int] ] keyword[is] keyword[None] :
identifier[out] . identifier[append] ( identifier[retrieve_param] ( identifier[i] [ literal[int] ]))
keyword[else] :
identifier[out] . identifier[append] ( identifier[i] [ literal[int] ])
keyword[return] identifier[out] | def get_default(self, *args, **kwargs):
"""Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
"""
def retrieve_param(i):
try:
return self.__getattribute__(i) # depends on [control=['try'], data=[]]
except AttributeError:
if i == 'device':
return self.default_device # depends on [control=['if'], data=[]]
else:
return self.__getattribute__(i.upper()) # depends on [control=['except'], data=[]]
if len(args) == 0:
if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None:
return kwargs[list(kwargs.keys())[0]] # depends on [control=['if'], data=[]]
elif len(kwargs) == 1:
return retrieve_param(list(kwargs.keys())[0]) # depends on [control=['if'], data=[]]
else:
raise TypeError('As dict is unordered, it is impossible to givethe parameters in the correct order.') # depends on [control=['if'], data=[]]
else:
out = []
for i in args:
if i[1] is None:
out.append(retrieve_param(i[0])) # depends on [control=['if'], data=[]]
else:
out.append(i[1]) # depends on [control=['for'], data=['i']]
return out |
def get_changeset(changeset):
"""Get the changeset using the OSM API and return the content as a XML
ElementTree.
Args:
changeset: the id of the changeset.
"""
url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format(
changeset
)
return ET.fromstring(requests.get(url).content) | def function[get_changeset, parameter[changeset]]:
constant[Get the changeset using the OSM API and return the content as a XML
ElementTree.
Args:
changeset: the id of the changeset.
]
variable[url] assign[=] call[constant[https://www.openstreetmap.org/api/0.6/changeset/{}/download].format, parameter[name[changeset]]]
return[call[name[ET].fromstring, parameter[call[name[requests].get, parameter[name[url]]].content]]] | keyword[def] identifier[get_changeset] ( identifier[changeset] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[changeset]
)
keyword[return] identifier[ET] . identifier[fromstring] ( identifier[requests] . identifier[get] ( identifier[url] ). identifier[content] ) | def get_changeset(changeset):
"""Get the changeset using the OSM API and return the content as a XML
ElementTree.
Args:
changeset: the id of the changeset.
"""
url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format(changeset)
return ET.fromstring(requests.get(url).content) |
def predecessors(self, node, exclude_compressed=True):
"""
Returns the list of predecessors of a given node
Parameters
----------
node : str
The target node
exclude_compressed : boolean
If true, compressed nodes are excluded from the predecessors list
Returns
-------
list
List of predecessors nodes
"""
preds = super(Graph, self).predecessors(node)
if exclude_compressed:
return [n for n in preds if not self.node[n].get('compressed', False)]
else:
return preds | def function[predecessors, parameter[self, node, exclude_compressed]]:
constant[
Returns the list of predecessors of a given node
Parameters
----------
node : str
The target node
exclude_compressed : boolean
If true, compressed nodes are excluded from the predecessors list
Returns
-------
list
List of predecessors nodes
]
variable[preds] assign[=] call[call[name[super], parameter[name[Graph], name[self]]].predecessors, parameter[name[node]]]
if name[exclude_compressed] begin[:]
return[<ast.ListComp object at 0x7da1b0b39480>] | keyword[def] identifier[predecessors] ( identifier[self] , identifier[node] , identifier[exclude_compressed] = keyword[True] ):
literal[string]
identifier[preds] = identifier[super] ( identifier[Graph] , identifier[self] ). identifier[predecessors] ( identifier[node] )
keyword[if] identifier[exclude_compressed] :
keyword[return] [ identifier[n] keyword[for] identifier[n] keyword[in] identifier[preds] keyword[if] keyword[not] identifier[self] . identifier[node] [ identifier[n] ]. identifier[get] ( literal[string] , keyword[False] )]
keyword[else] :
keyword[return] identifier[preds] | def predecessors(self, node, exclude_compressed=True):
"""
Returns the list of predecessors of a given node
Parameters
----------
node : str
The target node
exclude_compressed : boolean
If true, compressed nodes are excluded from the predecessors list
Returns
-------
list
List of predecessors nodes
"""
preds = super(Graph, self).predecessors(node)
if exclude_compressed:
return [n for n in preds if not self.node[n].get('compressed', False)] # depends on [control=['if'], data=[]]
else:
return preds |
def grouper(n, iterable):
"""Itertools recipe
>>> list(grouper(3, iter('ABCDEFG')))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
return (iterable[i:i + n] for i in range(0, len(iterable), n)) | def function[grouper, parameter[n, iterable]]:
constant[Itertools recipe
>>> list(grouper(3, iter('ABCDEFG')))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
]
return[<ast.GeneratorExp object at 0x7da1b1f4abc0>] | keyword[def] identifier[grouper] ( identifier[n] , identifier[iterable] ):
literal[string]
keyword[return] ( identifier[iterable] [ identifier[i] : identifier[i] + identifier[n] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[iterable] ), identifier[n] )) | def grouper(n, iterable):
"""Itertools recipe
>>> list(grouper(3, iter('ABCDEFG')))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
return (iterable[i:i + n] for i in range(0, len(iterable), n)) |
def _clean(self):
"""Remove references of inactive workers periodically."""
if self._workers:
for w in self._workers:
if w.is_finished():
self._workers.remove(w)
else:
self._current_worker = None
self._timer.stop() | def function[_clean, parameter[self]]:
constant[Remove references of inactive workers periodically.]
if name[self]._workers begin[:]
for taget[name[w]] in starred[name[self]._workers] begin[:]
if call[name[w].is_finished, parameter[]] begin[:]
call[name[self]._workers.remove, parameter[name[w]]] | keyword[def] identifier[_clean] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_workers] :
keyword[for] identifier[w] keyword[in] identifier[self] . identifier[_workers] :
keyword[if] identifier[w] . identifier[is_finished] ():
identifier[self] . identifier[_workers] . identifier[remove] ( identifier[w] )
keyword[else] :
identifier[self] . identifier[_current_worker] = keyword[None]
identifier[self] . identifier[_timer] . identifier[stop] () | def _clean(self):
"""Remove references of inactive workers periodically."""
if self._workers:
for w in self._workers:
if w.is_finished():
self._workers.remove(w) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] # depends on [control=['if'], data=[]]
else:
self._current_worker = None
self._timer.stop() |
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(_hval(value)) | def function[add_header, parameter[self, name, value]]:
constant[ Add an additional response header, not removing duplicates. ]
call[call[name[self]._headers.setdefault, parameter[call[name[_hkey], parameter[name[name]]], list[[]]]].append, parameter[call[name[_hval], parameter[name[value]]]]] | keyword[def] identifier[add_header] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[self] . identifier[_headers] . identifier[setdefault] ( identifier[_hkey] ( identifier[name] ),[]). identifier[append] ( identifier[_hval] ( identifier[value] )) | def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(_hval(value)) |
def addToPrePrepares(self, pp: PrePrepare) -> None:
"""
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
"""
key = (pp.viewNo, pp.ppSeqNo)
self.prePrepares[key] = pp
self.lastPrePrepareSeqNo = pp.ppSeqNo
self.last_accepted_pre_prepare_time = pp.ppTime
self.dequeue_prepares(*key)
self.dequeue_commits(*key)
self.stats.inc(TPCStat.PrePrepareRcvd)
self.tryPrepare(pp) | def function[addToPrePrepares, parameter[self, pp]]:
constant[
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
]
variable[key] assign[=] tuple[[<ast.Attribute object at 0x7da1b1736020>, <ast.Attribute object at 0x7da1b1735780>]]
call[name[self].prePrepares][name[key]] assign[=] name[pp]
name[self].lastPrePrepareSeqNo assign[=] name[pp].ppSeqNo
name[self].last_accepted_pre_prepare_time assign[=] name[pp].ppTime
call[name[self].dequeue_prepares, parameter[<ast.Starred object at 0x7da1b1737910>]]
call[name[self].dequeue_commits, parameter[<ast.Starred object at 0x7da1b1735cc0>]]
call[name[self].stats.inc, parameter[name[TPCStat].PrePrepareRcvd]]
call[name[self].tryPrepare, parameter[name[pp]]] | keyword[def] identifier[addToPrePrepares] ( identifier[self] , identifier[pp] : identifier[PrePrepare] )-> keyword[None] :
literal[string]
identifier[key] =( identifier[pp] . identifier[viewNo] , identifier[pp] . identifier[ppSeqNo] )
identifier[self] . identifier[prePrepares] [ identifier[key] ]= identifier[pp]
identifier[self] . identifier[lastPrePrepareSeqNo] = identifier[pp] . identifier[ppSeqNo]
identifier[self] . identifier[last_accepted_pre_prepare_time] = identifier[pp] . identifier[ppTime]
identifier[self] . identifier[dequeue_prepares] (* identifier[key] )
identifier[self] . identifier[dequeue_commits] (* identifier[key] )
identifier[self] . identifier[stats] . identifier[inc] ( identifier[TPCStat] . identifier[PrePrepareRcvd] )
identifier[self] . identifier[tryPrepare] ( identifier[pp] ) | def addToPrePrepares(self, pp: PrePrepare) -> None:
"""
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
"""
key = (pp.viewNo, pp.ppSeqNo)
self.prePrepares[key] = pp
self.lastPrePrepareSeqNo = pp.ppSeqNo
self.last_accepted_pre_prepare_time = pp.ppTime
self.dequeue_prepares(*key)
self.dequeue_commits(*key)
self.stats.inc(TPCStat.PrePrepareRcvd)
self.tryPrepare(pp) |
def _see_remote_link_node(self, node_id, fringe=None, dist=None,
check_dsp=lambda x: True):
"""
See data remote links of the node (set output to remote links).
:param node_id:
Node id.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param check_dsp:
A function to check if the remote dispatcher is ok.
:type check_dsp: (Dispatcher) -> bool
"""
# Namespace shortcut.
node, p_id, c_i = self.nodes[node_id], self.index[:-1], self.index[-1:]
if node['type'] == 'data' and p_id and check_dsp(p_id):
sol = self.sub_sol[self.index[:-1]] # Get parent solution.
for dsp_id, n in sol.dsp.nodes.items():
if n['index'] == c_i and node_id in n.get('outputs', {}):
value = self[node_id] # Get data output.
for n_id in stlp(n['outputs'][node_id]):
# Node has been visited or inp do not coincide with out.
if not (n_id in sol._visited or
sol.workflow.has_edge(n_id, dsp_id)):
# Donate the result to the child.
sol._wf_add_edge(dsp_id, n_id, value=value)
if fringe is not None:
# See node.
sol._see_node(n_id, fringe, dist, w_wait_in=2)
break | def function[_see_remote_link_node, parameter[self, node_id, fringe, dist, check_dsp]]:
constant[
See data remote links of the node (set output to remote links).
:param node_id:
Node id.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param check_dsp:
A function to check if the remote dispatcher is ok.
:type check_dsp: (Dispatcher) -> bool
]
<ast.Tuple object at 0x7da18f09c4f0> assign[=] tuple[[<ast.Subscript object at 0x7da18f09c400>, <ast.Subscript object at 0x7da18f09fa60>, <ast.Subscript object at 0x7da18f09eb30>]]
if <ast.BoolOp object at 0x7da18f09c6d0> begin[:]
variable[sol] assign[=] call[name[self].sub_sol][call[name[self].index][<ast.Slice object at 0x7da18f09dd50>]]
for taget[tuple[[<ast.Name object at 0x7da18f09ed70>, <ast.Name object at 0x7da18f09d330>]]] in starred[call[name[sol].dsp.nodes.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18f09cc40> begin[:]
variable[value] assign[=] call[name[self]][name[node_id]]
for taget[name[n_id]] in starred[call[name[stlp], parameter[call[call[name[n]][constant[outputs]]][name[node_id]]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6e6b60> begin[:]
call[name[sol]._wf_add_edge, parameter[name[dsp_id], name[n_id]]]
if compare[name[fringe] is_not constant[None]] begin[:]
call[name[sol]._see_node, parameter[name[n_id], name[fringe], name[dist]]]
break | keyword[def] identifier[_see_remote_link_node] ( identifier[self] , identifier[node_id] , identifier[fringe] = keyword[None] , identifier[dist] = keyword[None] ,
identifier[check_dsp] = keyword[lambda] identifier[x] : keyword[True] ):
literal[string]
identifier[node] , identifier[p_id] , identifier[c_i] = identifier[self] . identifier[nodes] [ identifier[node_id] ], identifier[self] . identifier[index] [:- literal[int] ], identifier[self] . identifier[index] [- literal[int] :]
keyword[if] identifier[node] [ literal[string] ]== literal[string] keyword[and] identifier[p_id] keyword[and] identifier[check_dsp] ( identifier[p_id] ):
identifier[sol] = identifier[self] . identifier[sub_sol] [ identifier[self] . identifier[index] [:- literal[int] ]]
keyword[for] identifier[dsp_id] , identifier[n] keyword[in] identifier[sol] . identifier[dsp] . identifier[nodes] . identifier[items] ():
keyword[if] identifier[n] [ literal[string] ]== identifier[c_i] keyword[and] identifier[node_id] keyword[in] identifier[n] . identifier[get] ( literal[string] ,{}):
identifier[value] = identifier[self] [ identifier[node_id] ]
keyword[for] identifier[n_id] keyword[in] identifier[stlp] ( identifier[n] [ literal[string] ][ identifier[node_id] ]):
keyword[if] keyword[not] ( identifier[n_id] keyword[in] identifier[sol] . identifier[_visited] keyword[or]
identifier[sol] . identifier[workflow] . identifier[has_edge] ( identifier[n_id] , identifier[dsp_id] )):
identifier[sol] . identifier[_wf_add_edge] ( identifier[dsp_id] , identifier[n_id] , identifier[value] = identifier[value] )
keyword[if] identifier[fringe] keyword[is] keyword[not] keyword[None] :
identifier[sol] . identifier[_see_node] ( identifier[n_id] , identifier[fringe] , identifier[dist] , identifier[w_wait_in] = literal[int] )
keyword[break] | def _see_remote_link_node(self, node_id, fringe=None, dist=None, check_dsp=lambda x: True):
"""
See data remote links of the node (set output to remote links).
:param node_id:
Node id.
:type node_id: str
:param fringe:
Heapq of closest available nodes.
:type fringe: list[(float | int, bool, (str, Dispatcher)]
:param dist:
Distance from the starting node.
:type dist: float, int
:param check_dsp:
A function to check if the remote dispatcher is ok.
:type check_dsp: (Dispatcher) -> bool
"""
# Namespace shortcut.
(node, p_id, c_i) = (self.nodes[node_id], self.index[:-1], self.index[-1:])
if node['type'] == 'data' and p_id and check_dsp(p_id):
sol = self.sub_sol[self.index[:-1]] # Get parent solution.
for (dsp_id, n) in sol.dsp.nodes.items():
if n['index'] == c_i and node_id in n.get('outputs', {}):
value = self[node_id] # Get data output.
for n_id in stlp(n['outputs'][node_id]):
# Node has been visited or inp do not coincide with out.
if not (n_id in sol._visited or sol.workflow.has_edge(n_id, dsp_id)):
# Donate the result to the child.
sol._wf_add_edge(dsp_id, n_id, value=value)
if fringe is not None:
# See node.
sol._see_node(n_id, fringe, dist, w_wait_in=2) # depends on [control=['if'], data=['fringe']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n_id']]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types')
message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'
message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'
message += ' coordinate variabel with dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_station(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results | def function[check_dimensions, parameter[self, dataset]]:
constant[
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
]
variable[results] assign[=] list[[]]
variable[required_ctx] assign[=] call[name[TestCtx], parameter[name[BaseCheck].HIGH, constant[All geophysical variables are timeseries-profile-orthogonal feature types]]]
variable[message] assign[=] constant[{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).]
<ast.AugAssign object at 0x7da1b26af550>
<ast.AugAssign object at 0x7da1b26af610>
<ast.AugAssign object at 0x7da1b26af850>
for taget[name[variable]] in starred[call[name[util].get_geophysical_variables, parameter[name[dataset]]]] begin[:]
variable[is_valid] assign[=] call[name[util].is_timeseries_profile_single_station, parameter[name[dataset], name[variable]]]
variable[is_valid] assign[=] <ast.BoolOp object at 0x7da18dc07c10>
call[name[required_ctx].assert_true, parameter[name[is_valid], call[name[message].format, parameter[name[variable]]]]]
call[name[results].append, parameter[call[name[required_ctx].to_result, parameter[]]]]
return[name[results]] | keyword[def] identifier[check_dimensions] ( identifier[self] , identifier[dataset] ):
literal[string]
identifier[results] =[]
identifier[required_ctx] = identifier[TestCtx] ( identifier[BaseCheck] . identifier[HIGH] , literal[string] )
identifier[message] = literal[string]
identifier[message] += literal[string]
identifier[message] += literal[string]
identifier[message] += literal[string]
keyword[for] identifier[variable] keyword[in] identifier[util] . identifier[get_geophysical_variables] ( identifier[dataset] ):
identifier[is_valid] = identifier[util] . identifier[is_timeseries_profile_single_station] ( identifier[dataset] , identifier[variable] )
identifier[is_valid] = identifier[is_valid] keyword[or] identifier[util] . identifier[is_timeseries_profile_multi_station] ( identifier[dataset] , identifier[variable] )
identifier[required_ctx] . identifier[assert_true] (
identifier[is_valid] ,
identifier[message] . identifier[format] ( identifier[variable] )
)
identifier[results] . identifier[append] ( identifier[required_ctx] . identifier[to_result] ())
keyword[return] identifier[results] | def check_dimensions(self, dataset):
"""
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
"""
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types')
message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'
message += " If it's a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have"
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'
message += ' coordinate variabel with dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_station(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable)
required_ctx.assert_true(is_valid, message.format(variable)) # depends on [control=['for'], data=['variable']]
results.append(required_ctx.to_result())
return results |
def rename_next_state_fluent(name: str) -> str:
'''Returns next state fluent canonical name.
Args:
name (str): The current state fluent name.
Returns:
str: The next state fluent name.
'''
i = name.index('/')
functor = name[:i-1]
arity = name[i+1:]
return "{}/{}".format(functor, arity) | def function[rename_next_state_fluent, parameter[name]]:
constant[Returns next state fluent canonical name.
Args:
name (str): The current state fluent name.
Returns:
str: The next state fluent name.
]
variable[i] assign[=] call[name[name].index, parameter[constant[/]]]
variable[functor] assign[=] call[name[name]][<ast.Slice object at 0x7da1b0a7a860>]
variable[arity] assign[=] call[name[name]][<ast.Slice object at 0x7da1b0ae0eb0>]
return[call[constant[{}/{}].format, parameter[name[functor], name[arity]]]] | keyword[def] identifier[rename_next_state_fluent] ( identifier[name] : identifier[str] )-> identifier[str] :
literal[string]
identifier[i] = identifier[name] . identifier[index] ( literal[string] )
identifier[functor] = identifier[name] [: identifier[i] - literal[int] ]
identifier[arity] = identifier[name] [ identifier[i] + literal[int] :]
keyword[return] literal[string] . identifier[format] ( identifier[functor] , identifier[arity] ) | def rename_next_state_fluent(name: str) -> str:
"""Returns next state fluent canonical name.
Args:
name (str): The current state fluent name.
Returns:
str: The next state fluent name.
"""
i = name.index('/')
functor = name[:i - 1]
arity = name[i + 1:]
return '{}/{}'.format(functor, arity) |
def ext_pillar(minion_id,
pillar,
*args,
**kwargs):
'''
Execute queries against POSTGRES, merge and return as a dict
'''
return POSTGRESExtPillar().fetch(minion_id, pillar, *args, **kwargs) | def function[ext_pillar, parameter[minion_id, pillar]]:
constant[
Execute queries against POSTGRES, merge and return as a dict
]
return[call[call[name[POSTGRESExtPillar], parameter[]].fetch, parameter[name[minion_id], name[pillar], <ast.Starred object at 0x7da18dc99c60>]]] | keyword[def] identifier[ext_pillar] ( identifier[minion_id] ,
identifier[pillar] ,
* identifier[args] ,
** identifier[kwargs] ):
literal[string]
keyword[return] identifier[POSTGRESExtPillar] (). identifier[fetch] ( identifier[minion_id] , identifier[pillar] ,* identifier[args] ,** identifier[kwargs] ) | def ext_pillar(minion_id, pillar, *args, **kwargs):
"""
Execute queries against POSTGRES, merge and return as a dict
"""
return POSTGRESExtPillar().fetch(minion_id, pillar, *args, **kwargs) |
def _check_transition_validity(self, check_transition):
""" Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState.
:param check_transition: the transition to check for validity
:return:
"""
valid, message = super(BarrierConcurrencyState, self)._check_transition_validity(check_transition)
if not valid:
return False, message
# Only the following transitions are allowed in barrier concurrency states:
# - Transitions from the decider state to the parent state\n"
# - Transitions from not-decider states to the decider state\n"
# - Transitions from not_decider states from aborted/preempted outcomes to the
# aborted/preempted outcome of the parent
from_state_id = check_transition.from_state
to_state_id = check_transition.to_state
from_outcome_id = check_transition.from_outcome
to_outcome_id = check_transition.to_outcome
if from_state_id == UNIQUE_DECIDER_STATE_ID:
if to_state_id != self.state_id:
return False, "Transition from the decider state must go to the parent state"
else:
if to_state_id != UNIQUE_DECIDER_STATE_ID:
if from_outcome_id not in [-2, -1] or to_outcome_id not in [-2, -1]:
return False, "Transition from this state must go to the decider state. The only exception are " \
"transition from aborted/preempted to the parent aborted/preempted outcomes"
return True, message | def function[_check_transition_validity, parameter[self, check_transition]]:
constant[ Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState.
:param check_transition: the transition to check for validity
:return:
]
<ast.Tuple object at 0x7da18eb552d0> assign[=] call[call[name[super], parameter[name[BarrierConcurrencyState], name[self]]]._check_transition_validity, parameter[name[check_transition]]]
if <ast.UnaryOp object at 0x7da18eb55300> begin[:]
return[tuple[[<ast.Constant object at 0x7da18eb56170>, <ast.Name object at 0x7da18eb54370>]]]
variable[from_state_id] assign[=] name[check_transition].from_state
variable[to_state_id] assign[=] name[check_transition].to_state
variable[from_outcome_id] assign[=] name[check_transition].from_outcome
variable[to_outcome_id] assign[=] name[check_transition].to_outcome
if compare[name[from_state_id] equal[==] name[UNIQUE_DECIDER_STATE_ID]] begin[:]
if compare[name[to_state_id] not_equal[!=] name[self].state_id] begin[:]
return[tuple[[<ast.Constant object at 0x7da18eb57040>, <ast.Constant object at 0x7da18eb54a30>]]]
return[tuple[[<ast.Constant object at 0x7da18eb54ac0>, <ast.Name object at 0x7da18eb57460>]]] | keyword[def] identifier[_check_transition_validity] ( identifier[self] , identifier[check_transition] ):
literal[string]
identifier[valid] , identifier[message] = identifier[super] ( identifier[BarrierConcurrencyState] , identifier[self] ). identifier[_check_transition_validity] ( identifier[check_transition] )
keyword[if] keyword[not] identifier[valid] :
keyword[return] keyword[False] , identifier[message]
identifier[from_state_id] = identifier[check_transition] . identifier[from_state]
identifier[to_state_id] = identifier[check_transition] . identifier[to_state]
identifier[from_outcome_id] = identifier[check_transition] . identifier[from_outcome]
identifier[to_outcome_id] = identifier[check_transition] . identifier[to_outcome]
keyword[if] identifier[from_state_id] == identifier[UNIQUE_DECIDER_STATE_ID] :
keyword[if] identifier[to_state_id] != identifier[self] . identifier[state_id] :
keyword[return] keyword[False] , literal[string]
keyword[else] :
keyword[if] identifier[to_state_id] != identifier[UNIQUE_DECIDER_STATE_ID] :
keyword[if] identifier[from_outcome_id] keyword[not] keyword[in] [- literal[int] ,- literal[int] ] keyword[or] identifier[to_outcome_id] keyword[not] keyword[in] [- literal[int] ,- literal[int] ]:
keyword[return] keyword[False] , literal[string] literal[string]
keyword[return] keyword[True] , identifier[message] | def _check_transition_validity(self, check_transition):
""" Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState.
:param check_transition: the transition to check for validity
:return:
"""
(valid, message) = super(BarrierConcurrencyState, self)._check_transition_validity(check_transition)
if not valid:
return (False, message) # depends on [control=['if'], data=[]]
# Only the following transitions are allowed in barrier concurrency states:
# - Transitions from the decider state to the parent state\n"
# - Transitions from not-decider states to the decider state\n"
# - Transitions from not_decider states from aborted/preempted outcomes to the
# aborted/preempted outcome of the parent
from_state_id = check_transition.from_state
to_state_id = check_transition.to_state
from_outcome_id = check_transition.from_outcome
to_outcome_id = check_transition.to_outcome
if from_state_id == UNIQUE_DECIDER_STATE_ID:
if to_state_id != self.state_id:
return (False, 'Transition from the decider state must go to the parent state') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif to_state_id != UNIQUE_DECIDER_STATE_ID:
if from_outcome_id not in [-2, -1] or to_outcome_id not in [-2, -1]:
return (False, 'Transition from this state must go to the decider state. The only exception are transition from aborted/preempted to the parent aborted/preempted outcomes') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return (True, message) |
def status(
message: str = None,
progress: float = None,
section_message: str = None,
section_progress: float = None,
):
"""
Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
if message is not None:
step.progress_message = message
if progress is not None:
step.progress = max(0.0, min(1.0, progress))
if section_message is not None:
step.sub_progress_message = section_message
if section_progress is not None:
step.sub_progress = section_progress | def function[status, parameter[message, progress, section_message, section_progress]]:
constant[
Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained.
]
call[name[environ].abort_thread, parameter[]]
variable[step] assign[=] call[name[_cd].project.get_internal_project, parameter[]].current_step
if compare[name[message] is_not constant[None]] begin[:]
name[step].progress_message assign[=] name[message]
if compare[name[progress] is_not constant[None]] begin[:]
name[step].progress assign[=] call[name[max], parameter[constant[0.0], call[name[min], parameter[constant[1.0], name[progress]]]]]
if compare[name[section_message] is_not constant[None]] begin[:]
name[step].sub_progress_message assign[=] name[section_message]
if compare[name[section_progress] is_not constant[None]] begin[:]
name[step].sub_progress assign[=] name[section_progress] | keyword[def] identifier[status] (
identifier[message] : identifier[str] = keyword[None] ,
identifier[progress] : identifier[float] = keyword[None] ,
identifier[section_message] : identifier[str] = keyword[None] ,
identifier[section_progress] : identifier[float] = keyword[None] ,
):
literal[string]
identifier[environ] . identifier[abort_thread] ()
identifier[step] = identifier[_cd] . identifier[project] . identifier[get_internal_project] (). identifier[current_step]
keyword[if] identifier[message] keyword[is] keyword[not] keyword[None] :
identifier[step] . identifier[progress_message] = identifier[message]
keyword[if] identifier[progress] keyword[is] keyword[not] keyword[None] :
identifier[step] . identifier[progress] = identifier[max] ( literal[int] , identifier[min] ( literal[int] , identifier[progress] ))
keyword[if] identifier[section_message] keyword[is] keyword[not] keyword[None] :
identifier[step] . identifier[sub_progress_message] = identifier[section_message]
keyword[if] identifier[section_progress] keyword[is] keyword[not] keyword[None] :
identifier[step] . identifier[sub_progress] = identifier[section_progress] | def status(message: str=None, progress: float=None, section_message: str=None, section_progress: float=None):
"""
Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
if message is not None:
step.progress_message = message # depends on [control=['if'], data=['message']]
if progress is not None:
step.progress = max(0.0, min(1.0, progress)) # depends on [control=['if'], data=['progress']]
if section_message is not None:
step.sub_progress_message = section_message # depends on [control=['if'], data=['section_message']]
if section_progress is not None:
step.sub_progress = section_progress # depends on [control=['if'], data=['section_progress']] |
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp) | def function[close, parameter[self]]:
constant[Close the file, and for mode 'w', 'x' and 'a' write the ending
records.]
if compare[name[self].fp is constant[None]] begin[:]
return[None]
<ast.Try object at 0x7da1b11123e0> | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[fp] keyword[is] keyword[None] :
keyword[return]
keyword[try] :
keyword[if] identifier[self] . identifier[mode] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[and] identifier[self] . identifier[_didModify] :
keyword[with] identifier[self] . identifier[_lock] :
keyword[if] identifier[self] . identifier[_seekable] :
identifier[self] . identifier[fp] . identifier[seek] ( identifier[self] . identifier[start_dir] )
identifier[self] . identifier[_write_end_record] ()
keyword[finally] :
identifier[fp] = identifier[self] . identifier[fp]
identifier[self] . identifier[fp] = keyword[None]
identifier[self] . identifier[_fpclose] ( identifier[fp] ) | def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return # depends on [control=['if'], data=[]]
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir) # depends on [control=['if'], data=[]]
self._write_end_record() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
fp = self.fp
self.fp = None
self._fpclose(fp) |
def write_raster_window(
in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None,
tags=None, bucket_resource=None
):
"""
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
"""
if not isinstance(out_path, str):
raise TypeError("out_path must be a string")
logger.debug("write %s", out_path)
if out_path == "memoryfile":
raise DeprecationWarning(
"Writing to memoryfile with write_raster_window() is deprecated. "
"Please use RasterWindowMemoryFile."
)
out_tile = in_tile if out_tile is None else out_tile
_validate_write_window_params(in_tile, out_tile, in_data, out_profile)
# extract data
window_data = extract_from_array(
in_raster=in_data,
in_affine=in_tile.affine,
out_tile=out_tile
) if in_tile != out_tile else in_data
# use transform instead of affine
if "affine" in out_profile:
out_profile["transform"] = out_profile.pop("affine")
# write if there is any band with non-masked data
if window_data.all() is not ma.masked:
try:
if out_path.startswith("s3://"):
with RasterWindowMemoryFile(
in_tile=out_tile,
in_data=window_data,
out_profile=out_profile,
out_tile=out_tile,
tags=tags
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
with rasterio.open(out_path, 'w', **out_profile) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.write(window_data.astype(out_profile["dtype"], copy=False))
_write_tags(dst, tags)
except Exception as e:
logger.exception("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "array window empty", out_path)) | def function[write_raster_window, parameter[in_tile, in_data, out_profile, out_tile, out_path, tags, bucket_resource]]:
constant[
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
]
if <ast.UnaryOp object at 0x7da1b00b0910> begin[:]
<ast.Raise object at 0x7da1b00b0a00>
call[name[logger].debug, parameter[constant[write %s], name[out_path]]]
if compare[name[out_path] equal[==] constant[memoryfile]] begin[:]
<ast.Raise object at 0x7da1b00b07c0>
variable[out_tile] assign[=] <ast.IfExp object at 0x7da1b00b0bb0>
call[name[_validate_write_window_params], parameter[name[in_tile], name[out_tile], name[in_data], name[out_profile]]]
variable[window_data] assign[=] <ast.IfExp object at 0x7da1b00b0dc0>
if compare[constant[affine] in name[out_profile]] begin[:]
call[name[out_profile]][constant[transform]] assign[=] call[name[out_profile].pop, parameter[constant[affine]]]
if compare[call[name[window_data].all, parameter[]] is_not name[ma].masked] begin[:]
<ast.Try object at 0x7da1b008b3d0> | keyword[def] identifier[write_raster_window] (
identifier[in_tile] = keyword[None] , identifier[in_data] = keyword[None] , identifier[out_profile] = keyword[None] , identifier[out_tile] = keyword[None] , identifier[out_path] = keyword[None] ,
identifier[tags] = keyword[None] , identifier[bucket_resource] = keyword[None]
):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[out_path] , identifier[str] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[out_path] )
keyword[if] identifier[out_path] == literal[string] :
keyword[raise] identifier[DeprecationWarning] (
literal[string]
literal[string]
)
identifier[out_tile] = identifier[in_tile] keyword[if] identifier[out_tile] keyword[is] keyword[None] keyword[else] identifier[out_tile]
identifier[_validate_write_window_params] ( identifier[in_tile] , identifier[out_tile] , identifier[in_data] , identifier[out_profile] )
identifier[window_data] = identifier[extract_from_array] (
identifier[in_raster] = identifier[in_data] ,
identifier[in_affine] = identifier[in_tile] . identifier[affine] ,
identifier[out_tile] = identifier[out_tile]
) keyword[if] identifier[in_tile] != identifier[out_tile] keyword[else] identifier[in_data]
keyword[if] literal[string] keyword[in] identifier[out_profile] :
identifier[out_profile] [ literal[string] ]= identifier[out_profile] . identifier[pop] ( literal[string] )
keyword[if] identifier[window_data] . identifier[all] () keyword[is] keyword[not] identifier[ma] . identifier[masked] :
keyword[try] :
keyword[if] identifier[out_path] . identifier[startswith] ( literal[string] ):
keyword[with] identifier[RasterWindowMemoryFile] (
identifier[in_tile] = identifier[out_tile] ,
identifier[in_data] = identifier[window_data] ,
identifier[out_profile] = identifier[out_profile] ,
identifier[out_tile] = identifier[out_tile] ,
identifier[tags] = identifier[tags]
) keyword[as] identifier[memfile] :
identifier[logger] . identifier[debug] (( identifier[out_tile] . identifier[id] , literal[string] , identifier[out_path] ))
identifier[bucket_resource] . identifier[put_object] (
identifier[Key] = literal[string] . identifier[join] ( identifier[out_path] . identifier[split] ( literal[string] )[ literal[int] :]),
identifier[Body] = identifier[memfile]
)
keyword[else] :
keyword[with] identifier[rasterio] . identifier[open] ( identifier[out_path] , literal[string] ,** identifier[out_profile] ) keyword[as] identifier[dst] :
identifier[logger] . identifier[debug] (( identifier[out_tile] . identifier[id] , literal[string] , identifier[out_path] ))
identifier[dst] . identifier[write] ( identifier[window_data] . identifier[astype] ( identifier[out_profile] [ literal[string] ], identifier[copy] = keyword[False] ))
identifier[_write_tags] ( identifier[dst] , identifier[tags] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[exception] ( literal[string] , identifier[out_path] , identifier[e] )
keyword[raise]
keyword[else] :
identifier[logger] . identifier[debug] (( identifier[out_tile] . identifier[id] , literal[string] , identifier[out_path] )) | def write_raster_window(in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None, tags=None, bucket_resource=None):
"""
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
"""
if not isinstance(out_path, str):
raise TypeError('out_path must be a string') # depends on [control=['if'], data=[]]
logger.debug('write %s', out_path)
if out_path == 'memoryfile':
raise DeprecationWarning('Writing to memoryfile with write_raster_window() is deprecated. Please use RasterWindowMemoryFile.') # depends on [control=['if'], data=[]]
out_tile = in_tile if out_tile is None else out_tile
_validate_write_window_params(in_tile, out_tile, in_data, out_profile)
# extract data
window_data = extract_from_array(in_raster=in_data, in_affine=in_tile.affine, out_tile=out_tile) if in_tile != out_tile else in_data
# use transform instead of affine
if 'affine' in out_profile:
out_profile['transform'] = out_profile.pop('affine') # depends on [control=['if'], data=['out_profile']]
# write if there is any band with non-masked data
if window_data.all() is not ma.masked:
try:
if out_path.startswith('s3://'):
with RasterWindowMemoryFile(in_tile=out_tile, in_data=window_data, out_profile=out_profile, out_tile=out_tile, tags=tags) as memfile:
logger.debug((out_tile.id, 'upload tile', out_path))
bucket_resource.put_object(Key='/'.join(out_path.split('/')[3:]), Body=memfile) # depends on [control=['with'], data=['memfile']] # depends on [control=['if'], data=[]]
else:
with rasterio.open(out_path, 'w', **out_profile) as dst:
logger.debug((out_tile.id, 'write tile', out_path))
dst.write(window_data.astype(out_profile['dtype'], copy=False))
_write_tags(dst, tags) # depends on [control=['with'], data=['dst']] # depends on [control=['try'], data=[]]
except Exception as e:
logger.exception('error while writing file %s: %s', out_path, e)
raise # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
logger.debug((out_tile.id, 'array window empty', out_path)) |
def get_translation(self, language_code, related_name=None):
"""
Fetch the translated model
"""
meta = self._parler_meta._get_extension_by_related_name(related_name)
return self._get_translated_model(language_code, meta=meta) | def function[get_translation, parameter[self, language_code, related_name]]:
constant[
Fetch the translated model
]
variable[meta] assign[=] call[name[self]._parler_meta._get_extension_by_related_name, parameter[name[related_name]]]
return[call[name[self]._get_translated_model, parameter[name[language_code]]]] | keyword[def] identifier[get_translation] ( identifier[self] , identifier[language_code] , identifier[related_name] = keyword[None] ):
literal[string]
identifier[meta] = identifier[self] . identifier[_parler_meta] . identifier[_get_extension_by_related_name] ( identifier[related_name] )
keyword[return] identifier[self] . identifier[_get_translated_model] ( identifier[language_code] , identifier[meta] = identifier[meta] ) | def get_translation(self, language_code, related_name=None):
"""
Fetch the translated model
"""
meta = self._parler_meta._get_extension_by_related_name(related_name)
return self._get_translated_model(language_code, meta=meta) |
def setUrl(self, url):
"""
Attempt to safely set the URL by string.
"""
if isUrl(url):
self._url = url
else:
raise exceptions.BadUrlException(url)
return self | def function[setUrl, parameter[self, url]]:
constant[
Attempt to safely set the URL by string.
]
if call[name[isUrl], parameter[name[url]]] begin[:]
name[self]._url assign[=] name[url]
return[name[self]] | keyword[def] identifier[setUrl] ( identifier[self] , identifier[url] ):
literal[string]
keyword[if] identifier[isUrl] ( identifier[url] ):
identifier[self] . identifier[_url] = identifier[url]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[BadUrlException] ( identifier[url] )
keyword[return] identifier[self] | def setUrl(self, url):
"""
Attempt to safely set the URL by string.
"""
if isUrl(url):
self._url = url # depends on [control=['if'], data=[]]
else:
raise exceptions.BadUrlException(url)
return self |
def explain_prediction_df(estimator, doc, **kwargs):
# type: (...) -> pd.DataFrame
""" Explain prediction and export explanation to ``pandas.DataFrame``
All keyword arguments are passed to :func:`eli5.explain_prediction`.
Weights of all features are exported by default.
"""
kwargs = _set_defaults(kwargs)
return format_as_dataframe(
eli5.explain_prediction(estimator, doc, **kwargs)) | def function[explain_prediction_df, parameter[estimator, doc]]:
constant[ Explain prediction and export explanation to ``pandas.DataFrame``
All keyword arguments are passed to :func:`eli5.explain_prediction`.
Weights of all features are exported by default.
]
variable[kwargs] assign[=] call[name[_set_defaults], parameter[name[kwargs]]]
return[call[name[format_as_dataframe], parameter[call[name[eli5].explain_prediction, parameter[name[estimator], name[doc]]]]]] | keyword[def] identifier[explain_prediction_df] ( identifier[estimator] , identifier[doc] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[_set_defaults] ( identifier[kwargs] )
keyword[return] identifier[format_as_dataframe] (
identifier[eli5] . identifier[explain_prediction] ( identifier[estimator] , identifier[doc] ,** identifier[kwargs] )) | def explain_prediction_df(estimator, doc, **kwargs):
# type: (...) -> pd.DataFrame
' Explain prediction and export explanation to ``pandas.DataFrame``\n All keyword arguments are passed to :func:`eli5.explain_prediction`.\n Weights of all features are exported by default.\n '
kwargs = _set_defaults(kwargs)
return format_as_dataframe(eli5.explain_prediction(estimator, doc, **kwargs)) |
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function: values = map(bin_function, values)
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1
if mode:
return sorted(bins.items(), key=lambda x: (x[1],x[0]), reverse=True)
else:
return sorted(bins.items()) | def function[histogram, parameter[values, mode, bin_function]]:
constant[Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first.]
if name[bin_function] begin[:]
variable[values] assign[=] call[name[map], parameter[name[bin_function], name[values]]]
variable[bins] assign[=] dictionary[[], []]
for taget[name[val]] in starred[name[values]] begin[:]
call[name[bins]][name[val]] assign[=] binary_operation[call[name[bins].get, parameter[name[val], constant[0]]] + constant[1]]
if name[mode] begin[:]
return[call[name[sorted], parameter[call[name[bins].items, parameter[]]]]] | keyword[def] identifier[histogram] ( identifier[values] , identifier[mode] = literal[int] , identifier[bin_function] = keyword[None] ):
literal[string]
keyword[if] identifier[bin_function] : identifier[values] = identifier[map] ( identifier[bin_function] , identifier[values] )
identifier[bins] ={}
keyword[for] identifier[val] keyword[in] identifier[values] :
identifier[bins] [ identifier[val] ]= identifier[bins] . identifier[get] ( identifier[val] , literal[int] )+ literal[int]
keyword[if] identifier[mode] :
keyword[return] identifier[sorted] ( identifier[bins] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] :( identifier[x] [ literal[int] ], identifier[x] [ literal[int] ]), identifier[reverse] = keyword[True] )
keyword[else] :
keyword[return] identifier[sorted] ( identifier[bins] . identifier[items] ()) | def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function:
values = map(bin_function, values) # depends on [control=['if'], data=[]]
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1 # depends on [control=['for'], data=['val']]
if mode:
return sorted(bins.items(), key=lambda x: (x[1], x[0]), reverse=True) # depends on [control=['if'], data=[]]
else:
return sorted(bins.items()) |
def read(self):
"""
(Re)read the configuration.
Beware that the current work directory may affect this operation:
If there is a 'paperwork.conf' in the current directory, it will be
read instead of '~/.paperwork.conf', see __init__())
"""
logger.info("Reloading %s ..." % self.__configfile)
# smash the previous config
self._configparser = configparser.SafeConfigParser()
self._configparser.read([self.__configfile])
sections = set()
for setting in self.settings.values():
sections.add(setting.section)
for section in sections:
# make sure that all the sections exist
if not self._configparser.has_section(section):
self._configparser.add_section(section)
for setting in self.settings.values():
setting.load(self._configparser) | def function[read, parameter[self]]:
constant[
(Re)read the configuration.
Beware that the current work directory may affect this operation:
If there is a 'paperwork.conf' in the current directory, it will be
read instead of '~/.paperwork.conf', see __init__())
]
call[name[logger].info, parameter[binary_operation[constant[Reloading %s ...] <ast.Mod object at 0x7da2590d6920> name[self].__configfile]]]
name[self]._configparser assign[=] call[name[configparser].SafeConfigParser, parameter[]]
call[name[self]._configparser.read, parameter[list[[<ast.Attribute object at 0x7da18f58f700>]]]]
variable[sections] assign[=] call[name[set], parameter[]]
for taget[name[setting]] in starred[call[name[self].settings.values, parameter[]]] begin[:]
call[name[sections].add, parameter[name[setting].section]]
for taget[name[section]] in starred[name[sections]] begin[:]
if <ast.UnaryOp object at 0x7da18f8102b0> begin[:]
call[name[self]._configparser.add_section, parameter[name[section]]]
for taget[name[setting]] in starred[call[name[self].settings.values, parameter[]]] begin[:]
call[name[setting].load, parameter[name[self]._configparser]] | keyword[def] identifier[read] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] % identifier[self] . identifier[__configfile] )
identifier[self] . identifier[_configparser] = identifier[configparser] . identifier[SafeConfigParser] ()
identifier[self] . identifier[_configparser] . identifier[read] ([ identifier[self] . identifier[__configfile] ])
identifier[sections] = identifier[set] ()
keyword[for] identifier[setting] keyword[in] identifier[self] . identifier[settings] . identifier[values] ():
identifier[sections] . identifier[add] ( identifier[setting] . identifier[section] )
keyword[for] identifier[section] keyword[in] identifier[sections] :
keyword[if] keyword[not] identifier[self] . identifier[_configparser] . identifier[has_section] ( identifier[section] ):
identifier[self] . identifier[_configparser] . identifier[add_section] ( identifier[section] )
keyword[for] identifier[setting] keyword[in] identifier[self] . identifier[settings] . identifier[values] ():
identifier[setting] . identifier[load] ( identifier[self] . identifier[_configparser] ) | def read(self):
"""
(Re)read the configuration.
Beware that the current work directory may affect this operation:
If there is a 'paperwork.conf' in the current directory, it will be
read instead of '~/.paperwork.conf', see __init__())
"""
logger.info('Reloading %s ...' % self.__configfile)
# smash the previous config
self._configparser = configparser.SafeConfigParser()
self._configparser.read([self.__configfile])
sections = set()
for setting in self.settings.values():
sections.add(setting.section) # depends on [control=['for'], data=['setting']]
for section in sections:
# make sure that all the sections exist
if not self._configparser.has_section(section):
self._configparser.add_section(section) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section']]
for setting in self.settings.values():
setting.load(self._configparser) # depends on [control=['for'], data=['setting']] |
def register_pubkey(self):
"""
XXX Support compressed point format.
XXX Check that the pubkey received is on the curve.
"""
# point_format = 0
# if self.point[0] in [b'\x02', b'\x03']:
# point_format = 1
curve_name = _tls_named_curves[self.named_curve]
curve = ec._CURVE_TYPES[curve_name]()
import_point = ec.EllipticCurvePublicNumbers.from_encoded_point
pubnum = import_point(curve, self.point)
s = self.tls_session
s.server_kx_pubkey = pubnum.public_key(default_backend())
if not s.client_kx_ecdh_params:
s.client_kx_ecdh_params = curve | def function[register_pubkey, parameter[self]]:
constant[
XXX Support compressed point format.
XXX Check that the pubkey received is on the curve.
]
variable[curve_name] assign[=] call[name[_tls_named_curves]][name[self].named_curve]
variable[curve] assign[=] call[call[name[ec]._CURVE_TYPES][name[curve_name]], parameter[]]
variable[import_point] assign[=] name[ec].EllipticCurvePublicNumbers.from_encoded_point
variable[pubnum] assign[=] call[name[import_point], parameter[name[curve], name[self].point]]
variable[s] assign[=] name[self].tls_session
name[s].server_kx_pubkey assign[=] call[name[pubnum].public_key, parameter[call[name[default_backend], parameter[]]]]
if <ast.UnaryOp object at 0x7da1b215cc40> begin[:]
name[s].client_kx_ecdh_params assign[=] name[curve] | keyword[def] identifier[register_pubkey] ( identifier[self] ):
literal[string]
identifier[curve_name] = identifier[_tls_named_curves] [ identifier[self] . identifier[named_curve] ]
identifier[curve] = identifier[ec] . identifier[_CURVE_TYPES] [ identifier[curve_name] ]()
identifier[import_point] = identifier[ec] . identifier[EllipticCurvePublicNumbers] . identifier[from_encoded_point]
identifier[pubnum] = identifier[import_point] ( identifier[curve] , identifier[self] . identifier[point] )
identifier[s] = identifier[self] . identifier[tls_session]
identifier[s] . identifier[server_kx_pubkey] = identifier[pubnum] . identifier[public_key] ( identifier[default_backend] ())
keyword[if] keyword[not] identifier[s] . identifier[client_kx_ecdh_params] :
identifier[s] . identifier[client_kx_ecdh_params] = identifier[curve] | def register_pubkey(self):
"""
XXX Support compressed point format.
XXX Check that the pubkey received is on the curve.
"""
# point_format = 0
# if self.point[0] in [b'\x02', b'\x03']:
# point_format = 1
curve_name = _tls_named_curves[self.named_curve]
curve = ec._CURVE_TYPES[curve_name]()
import_point = ec.EllipticCurvePublicNumbers.from_encoded_point
pubnum = import_point(curve, self.point)
s = self.tls_session
s.server_kx_pubkey = pubnum.public_key(default_backend())
if not s.client_kx_ecdh_params:
s.client_kx_ecdh_params = curve # depends on [control=['if'], data=[]] |
def any_datetime(from_date=datetime(1990, 1, 1), to_date=datetime.now()):
"""
Return random datetime from the [from_date, to_date] interval
>>> result = any_datetime(from_date=datetime(1990,1,1), to_date=datetime(1990,1,3))
>>> type(result)
<type 'datetime.datetime'>
>>> result >= datetime(1990,1,1) and result <= datetime(1990,1,3)
True
"""
days = any_int(min_value=0, max_value=(to_date - from_date).days-1)
time = timedelta(seconds=any_int(min_value=0, max_value=24*3600-1))
return from_date + timedelta(days=days) + time | def function[any_datetime, parameter[from_date, to_date]]:
constant[
Return random datetime from the [from_date, to_date] interval
>>> result = any_datetime(from_date=datetime(1990,1,1), to_date=datetime(1990,1,3))
>>> type(result)
<type 'datetime.datetime'>
>>> result >= datetime(1990,1,1) and result <= datetime(1990,1,3)
True
]
variable[days] assign[=] call[name[any_int], parameter[]]
variable[time] assign[=] call[name[timedelta], parameter[]]
return[binary_operation[binary_operation[name[from_date] + call[name[timedelta], parameter[]]] + name[time]]] | keyword[def] identifier[any_datetime] ( identifier[from_date] = identifier[datetime] ( literal[int] , literal[int] , literal[int] ), identifier[to_date] = identifier[datetime] . identifier[now] ()):
literal[string]
identifier[days] = identifier[any_int] ( identifier[min_value] = literal[int] , identifier[max_value] =( identifier[to_date] - identifier[from_date] ). identifier[days] - literal[int] )
identifier[time] = identifier[timedelta] ( identifier[seconds] = identifier[any_int] ( identifier[min_value] = literal[int] , identifier[max_value] = literal[int] * literal[int] - literal[int] ))
keyword[return] identifier[from_date] + identifier[timedelta] ( identifier[days] = identifier[days] )+ identifier[time] | def any_datetime(from_date=datetime(1990, 1, 1), to_date=datetime.now()):
"""
Return random datetime from the [from_date, to_date] interval
>>> result = any_datetime(from_date=datetime(1990,1,1), to_date=datetime(1990,1,3))
>>> type(result)
<type 'datetime.datetime'>
>>> result >= datetime(1990,1,1) and result <= datetime(1990,1,3)
True
"""
days = any_int(min_value=0, max_value=(to_date - from_date).days - 1)
time = timedelta(seconds=any_int(min_value=0, max_value=24 * 3600 - 1))
return from_date + timedelta(days=days) + time |
def sniff_csv_format(csv_file,
potential_sep=['\t', ',', ';', '|', '-', '_'],
max_test_lines=10,
zip_file=None):
""" Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: '\t', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
"""
def read_first_lines(filehandle):
lines = []
for i in range(max_test_lines):
line = ff.readline()
if line == '':
break
try:
line = line.decode('utf-8')
except AttributeError:
pass
lines.append(line[:-1])
return lines
if zip_file:
with zipfile.ZipFile(zip_file, 'r') as zz:
with zz.open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
else:
with open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
sep_aly_lines = [sorted([(line.count(sep), sep)
for sep in potential_sep if line.count(sep) > 0],
key=lambda x: x[0], reverse=True) for line in test_lines]
for nr, (count, sep) in enumerate(sep_aly_lines[0]):
for line in sep_aly_lines:
if line[nr][0] == count:
break
else:
sep = None
if sep:
break
nr_header_row = None
nr_index_col = None
if sep:
nr_index_col = find_first_number(test_lines[-1].split(sep))
if nr_index_col:
for nr_header_row, line in enumerate(test_lines):
if find_first_number(line.split(sep)) == nr_index_col:
break
return dict(sep=sep,
nr_header_row=nr_header_row,
nr_index_col=nr_index_col) | def function[sniff_csv_format, parameter[csv_file, potential_sep, max_test_lines, zip_file]]:
constant[ Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: ' ', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
]
def function[read_first_lines, parameter[filehandle]]:
variable[lines] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[max_test_lines]]]] begin[:]
variable[line] assign[=] call[name[ff].readline, parameter[]]
if compare[name[line] equal[==] constant[]] begin[:]
break
<ast.Try object at 0x7da18f58e950>
call[name[lines].append, parameter[call[name[line]][<ast.Slice object at 0x7da18f58e830>]]]
return[name[lines]]
if name[zip_file] begin[:]
with call[name[zipfile].ZipFile, parameter[name[zip_file], constant[r]]] begin[:]
with call[name[zz].open, parameter[name[csv_file], constant[r]]] begin[:]
variable[test_lines] assign[=] call[name[read_first_lines], parameter[name[ff]]]
variable[sep_aly_lines] assign[=] <ast.ListComp object at 0x7da18f58c610>
for taget[tuple[[<ast.Name object at 0x7da1b047a200>, <ast.Tuple object at 0x7da1b0478b50>]]] in starred[call[name[enumerate], parameter[call[name[sep_aly_lines]][constant[0]]]]] begin[:]
for taget[name[line]] in starred[name[sep_aly_lines]] begin[:]
if compare[call[call[name[line]][name[nr]]][constant[0]] equal[==] name[count]] begin[:]
break
if name[sep] begin[:]
break
variable[nr_header_row] assign[=] constant[None]
variable[nr_index_col] assign[=] constant[None]
if name[sep] begin[:]
variable[nr_index_col] assign[=] call[name[find_first_number], parameter[call[call[name[test_lines]][<ast.UnaryOp object at 0x7da1b04796f0>].split, parameter[name[sep]]]]]
if name[nr_index_col] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b047a980>, <ast.Name object at 0x7da1b047a9b0>]]] in starred[call[name[enumerate], parameter[name[test_lines]]]] begin[:]
if compare[call[name[find_first_number], parameter[call[name[line].split, parameter[name[sep]]]]] equal[==] name[nr_index_col]] begin[:]
break
return[call[name[dict], parameter[]]] | keyword[def] identifier[sniff_csv_format] ( identifier[csv_file] ,
identifier[potential_sep] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ],
identifier[max_test_lines] = literal[int] ,
identifier[zip_file] = keyword[None] ):
literal[string]
keyword[def] identifier[read_first_lines] ( identifier[filehandle] ):
identifier[lines] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[max_test_lines] ):
identifier[line] = identifier[ff] . identifier[readline] ()
keyword[if] identifier[line] == literal[string] :
keyword[break]
keyword[try] :
identifier[line] = identifier[line] . identifier[decode] ( literal[string] )
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[lines] . identifier[append] ( identifier[line] [:- literal[int] ])
keyword[return] identifier[lines]
keyword[if] identifier[zip_file] :
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[zip_file] , literal[string] ) keyword[as] identifier[zz] :
keyword[with] identifier[zz] . identifier[open] ( identifier[csv_file] , literal[string] ) keyword[as] identifier[ff] :
identifier[test_lines] = identifier[read_first_lines] ( identifier[ff] )
keyword[else] :
keyword[with] identifier[open] ( identifier[csv_file] , literal[string] ) keyword[as] identifier[ff] :
identifier[test_lines] = identifier[read_first_lines] ( identifier[ff] )
identifier[sep_aly_lines] =[ identifier[sorted] ([( identifier[line] . identifier[count] ( identifier[sep] ), identifier[sep] )
keyword[for] identifier[sep] keyword[in] identifier[potential_sep] keyword[if] identifier[line] . identifier[count] ( identifier[sep] )> literal[int] ],
identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] ) keyword[for] identifier[line] keyword[in] identifier[test_lines] ]
keyword[for] identifier[nr] ,( identifier[count] , identifier[sep] ) keyword[in] identifier[enumerate] ( identifier[sep_aly_lines] [ literal[int] ]):
keyword[for] identifier[line] keyword[in] identifier[sep_aly_lines] :
keyword[if] identifier[line] [ identifier[nr] ][ literal[int] ]== identifier[count] :
keyword[break]
keyword[else] :
identifier[sep] = keyword[None]
keyword[if] identifier[sep] :
keyword[break]
identifier[nr_header_row] = keyword[None]
identifier[nr_index_col] = keyword[None]
keyword[if] identifier[sep] :
identifier[nr_index_col] = identifier[find_first_number] ( identifier[test_lines] [- literal[int] ]. identifier[split] ( identifier[sep] ))
keyword[if] identifier[nr_index_col] :
keyword[for] identifier[nr_header_row] , identifier[line] keyword[in] identifier[enumerate] ( identifier[test_lines] ):
keyword[if] identifier[find_first_number] ( identifier[line] . identifier[split] ( identifier[sep] ))== identifier[nr_index_col] :
keyword[break]
keyword[return] identifier[dict] ( identifier[sep] = identifier[sep] ,
identifier[nr_header_row] = identifier[nr_header_row] ,
identifier[nr_index_col] = identifier[nr_index_col] ) | def sniff_csv_format(csv_file, potential_sep=['\t', ',', ';', '|', '-', '_'], max_test_lines=10, zip_file=None):
""" Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: ' ', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
"""
def read_first_lines(filehandle):
lines = []
for i in range(max_test_lines):
line = ff.readline()
if line == '':
break # depends on [control=['if'], data=[]]
try:
line = line.decode('utf-8') # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
lines.append(line[:-1]) # depends on [control=['for'], data=[]]
return lines
if zip_file:
with zipfile.ZipFile(zip_file, 'r') as zz:
with zz.open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff) # depends on [control=['with'], data=['ff']] # depends on [control=['with'], data=['zz']] # depends on [control=['if'], data=[]]
else:
with open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff) # depends on [control=['with'], data=['ff']]
sep_aly_lines = [sorted([(line.count(sep), sep) for sep in potential_sep if line.count(sep) > 0], key=lambda x: x[0], reverse=True) for line in test_lines]
for (nr, (count, sep)) in enumerate(sep_aly_lines[0]):
for line in sep_aly_lines:
if line[nr][0] == count:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
else:
sep = None
if sep:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
nr_header_row = None
nr_index_col = None
if sep:
nr_index_col = find_first_number(test_lines[-1].split(sep))
if nr_index_col:
for (nr_header_row, line) in enumerate(test_lines):
if find_first_number(line.split(sep)) == nr_index_col:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return dict(sep=sep, nr_header_row=nr_header_row, nr_index_col=nr_index_col) |
def is_solved(self):
"""
Check if Cube's F2L is solved.
"""
if self.cube.D == [[Square(self.cube["D"].colour)] * 3] * 3:
for face in "LFRB":
if self.cube.get_face(face)[1:] != [[Square(self.cube[face].colour)] * 3] * 2:
return False
return True
return False | def function[is_solved, parameter[self]]:
constant[
Check if Cube's F2L is solved.
]
if compare[name[self].cube.D equal[==] binary_operation[list[[<ast.BinOp object at 0x7da1b07b6f80>]] * constant[3]]] begin[:]
for taget[name[face]] in starred[constant[LFRB]] begin[:]
if compare[call[call[name[self].cube.get_face, parameter[name[face]]]][<ast.Slice object at 0x7da1b07b58a0>] not_equal[!=] binary_operation[list[[<ast.BinOp object at 0x7da1b07b6b60>]] * constant[2]]] begin[:]
return[constant[False]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[is_solved] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[cube] . identifier[D] ==[[ identifier[Square] ( identifier[self] . identifier[cube] [ literal[string] ]. identifier[colour] )]* literal[int] ]* literal[int] :
keyword[for] identifier[face] keyword[in] literal[string] :
keyword[if] identifier[self] . identifier[cube] . identifier[get_face] ( identifier[face] )[ literal[int] :]!=[[ identifier[Square] ( identifier[self] . identifier[cube] [ identifier[face] ]. identifier[colour] )]* literal[int] ]* literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[return] keyword[False] | def is_solved(self):
"""
Check if Cube's F2L is solved.
"""
if self.cube.D == [[Square(self.cube['D'].colour)] * 3] * 3:
for face in 'LFRB':
if self.cube.get_face(face)[1:] != [[Square(self.cube[face].colour)] * 3] * 2:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['face']]
return True # depends on [control=['if'], data=[]]
return False |
def update_datetime(value, range = None):
"""
Updates (drifts) a Date value within specified range defined
:param value: a Date value to drift.
:param range: (optional) a range in milliseconds. Default: 10 days
:return: an updated DateTime value.
"""
range = range if range != None else 10
if range < 0:
return value
days = RandomFloat.next_float(-range, range)
return value + datetime.timedelta(days) | def function[update_datetime, parameter[value, range]]:
constant[
Updates (drifts) a Date value within specified range defined
:param value: a Date value to drift.
:param range: (optional) a range in milliseconds. Default: 10 days
:return: an updated DateTime value.
]
variable[range] assign[=] <ast.IfExp object at 0x7da1b1452950>
if compare[name[range] less[<] constant[0]] begin[:]
return[name[value]]
variable[days] assign[=] call[name[RandomFloat].next_float, parameter[<ast.UnaryOp object at 0x7da1b1648760>, name[range]]]
return[binary_operation[name[value] + call[name[datetime].timedelta, parameter[name[days]]]]] | keyword[def] identifier[update_datetime] ( identifier[value] , identifier[range] = keyword[None] ):
literal[string]
identifier[range] = identifier[range] keyword[if] identifier[range] != keyword[None] keyword[else] literal[int]
keyword[if] identifier[range] < literal[int] :
keyword[return] identifier[value]
identifier[days] = identifier[RandomFloat] . identifier[next_float] (- identifier[range] , identifier[range] )
keyword[return] identifier[value] + identifier[datetime] . identifier[timedelta] ( identifier[days] ) | def update_datetime(value, range=None):
"""
Updates (drifts) a Date value within specified range defined
:param value: a Date value to drift.
:param range: (optional) a range in milliseconds. Default: 10 days
:return: an updated DateTime value.
"""
range = range if range != None else 10
if range < 0:
return value # depends on [control=['if'], data=[]]
days = RandomFloat.next_float(-range, range)
return value + datetime.timedelta(days) |
def hit(self, event):
"""
Notify a breakpoint that it's been hit.
This triggers the corresponding state transition and sets the
C{breakpoint} property of the given L{Event} object.
@see: L{disable}, L{enable}, L{one_shot}, L{running}
@type event: L{Event}
@param event: Debug event to handle (depends on the breakpoint type).
@raise AssertionError: Disabled breakpoints can't be hit.
"""
aProcess = event.get_process()
aThread = event.get_thread()
state = self.get_state()
event.breakpoint = self
if state == self.ENABLED:
self.running(aProcess, aThread)
elif state == self.RUNNING:
self.enable(aProcess, aThread)
elif state == self.ONESHOT:
self.disable(aProcess, aThread)
elif state == self.DISABLED:
# this should not happen
msg = "Hit a disabled breakpoint at address %s"
msg = msg % HexDump.address( self.get_address() )
warnings.warn(msg, BreakpointWarning) | def function[hit, parameter[self, event]]:
constant[
Notify a breakpoint that it's been hit.
This triggers the corresponding state transition and sets the
C{breakpoint} property of the given L{Event} object.
@see: L{disable}, L{enable}, L{one_shot}, L{running}
@type event: L{Event}
@param event: Debug event to handle (depends on the breakpoint type).
@raise AssertionError: Disabled breakpoints can't be hit.
]
variable[aProcess] assign[=] call[name[event].get_process, parameter[]]
variable[aThread] assign[=] call[name[event].get_thread, parameter[]]
variable[state] assign[=] call[name[self].get_state, parameter[]]
name[event].breakpoint assign[=] name[self]
if compare[name[state] equal[==] name[self].ENABLED] begin[:]
call[name[self].running, parameter[name[aProcess], name[aThread]]] | keyword[def] identifier[hit] ( identifier[self] , identifier[event] ):
literal[string]
identifier[aProcess] = identifier[event] . identifier[get_process] ()
identifier[aThread] = identifier[event] . identifier[get_thread] ()
identifier[state] = identifier[self] . identifier[get_state] ()
identifier[event] . identifier[breakpoint] = identifier[self]
keyword[if] identifier[state] == identifier[self] . identifier[ENABLED] :
identifier[self] . identifier[running] ( identifier[aProcess] , identifier[aThread] )
keyword[elif] identifier[state] == identifier[self] . identifier[RUNNING] :
identifier[self] . identifier[enable] ( identifier[aProcess] , identifier[aThread] )
keyword[elif] identifier[state] == identifier[self] . identifier[ONESHOT] :
identifier[self] . identifier[disable] ( identifier[aProcess] , identifier[aThread] )
keyword[elif] identifier[state] == identifier[self] . identifier[DISABLED] :
identifier[msg] = literal[string]
identifier[msg] = identifier[msg] % identifier[HexDump] . identifier[address] ( identifier[self] . identifier[get_address] ())
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[BreakpointWarning] ) | def hit(self, event):
"""
Notify a breakpoint that it's been hit.
This triggers the corresponding state transition and sets the
C{breakpoint} property of the given L{Event} object.
@see: L{disable}, L{enable}, L{one_shot}, L{running}
@type event: L{Event}
@param event: Debug event to handle (depends on the breakpoint type).
@raise AssertionError: Disabled breakpoints can't be hit.
"""
aProcess = event.get_process()
aThread = event.get_thread()
state = self.get_state()
event.breakpoint = self
if state == self.ENABLED:
self.running(aProcess, aThread) # depends on [control=['if'], data=[]]
elif state == self.RUNNING:
self.enable(aProcess, aThread) # depends on [control=['if'], data=[]]
elif state == self.ONESHOT:
self.disable(aProcess, aThread) # depends on [control=['if'], data=[]]
elif state == self.DISABLED:
# this should not happen
msg = 'Hit a disabled breakpoint at address %s'
msg = msg % HexDump.address(self.get_address())
warnings.warn(msg, BreakpointWarning) # depends on [control=['if'], data=[]] |
def compute_diff(dir_base, dir_cmp):
""" Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs'
"""
data = {}
data['deleted'] = list(set(dir_cmp['files']) - set(dir_base['files']))
data['created'] = list(set(dir_base['files']) - set(dir_cmp['files']))
data['updated'] = []
data['deleted_dirs'] = list(set(dir_cmp['subdirs']) - set(dir_base['subdirs']))
for f in set(dir_cmp['files']).intersection(set(dir_base['files'])):
if dir_base['index'][f] != dir_cmp['index'][f]:
data['updated'].append(f)
return data | def function[compute_diff, parameter[dir_base, dir_cmp]]:
constant[ Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs'
]
variable[data] assign[=] dictionary[[], []]
call[name[data]][constant[deleted]] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[call[name[dir_cmp]][constant[files]]]] - call[name[set], parameter[call[name[dir_base]][constant[files]]]]]]]
call[name[data]][constant[created]] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[call[name[dir_base]][constant[files]]]] - call[name[set], parameter[call[name[dir_cmp]][constant[files]]]]]]]
call[name[data]][constant[updated]] assign[=] list[[]]
call[name[data]][constant[deleted_dirs]] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[call[name[dir_cmp]][constant[subdirs]]]] - call[name[set], parameter[call[name[dir_base]][constant[subdirs]]]]]]]
for taget[name[f]] in starred[call[call[name[set], parameter[call[name[dir_cmp]][constant[files]]]].intersection, parameter[call[name[set], parameter[call[name[dir_base]][constant[files]]]]]]] begin[:]
if compare[call[call[name[dir_base]][constant[index]]][name[f]] not_equal[!=] call[call[name[dir_cmp]][constant[index]]][name[f]]] begin[:]
call[call[name[data]][constant[updated]].append, parameter[name[f]]]
return[name[data]] | keyword[def] identifier[compute_diff] ( identifier[dir_base] , identifier[dir_cmp] ):
literal[string]
identifier[data] ={}
identifier[data] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[dir_cmp] [ literal[string] ])- identifier[set] ( identifier[dir_base] [ literal[string] ]))
identifier[data] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[dir_base] [ literal[string] ])- identifier[set] ( identifier[dir_cmp] [ literal[string] ]))
identifier[data] [ literal[string] ]=[]
identifier[data] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[dir_cmp] [ literal[string] ])- identifier[set] ( identifier[dir_base] [ literal[string] ]))
keyword[for] identifier[f] keyword[in] identifier[set] ( identifier[dir_cmp] [ literal[string] ]). identifier[intersection] ( identifier[set] ( identifier[dir_base] [ literal[string] ])):
keyword[if] identifier[dir_base] [ literal[string] ][ identifier[f] ]!= identifier[dir_cmp] [ literal[string] ][ identifier[f] ]:
identifier[data] [ literal[string] ]. identifier[append] ( identifier[f] )
keyword[return] identifier[data] | def compute_diff(dir_base, dir_cmp):
""" Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs'
"""
data = {}
data['deleted'] = list(set(dir_cmp['files']) - set(dir_base['files']))
data['created'] = list(set(dir_base['files']) - set(dir_cmp['files']))
data['updated'] = []
data['deleted_dirs'] = list(set(dir_cmp['subdirs']) - set(dir_base['subdirs']))
for f in set(dir_cmp['files']).intersection(set(dir_base['files'])):
if dir_base['index'][f] != dir_cmp['index'][f]:
data['updated'].append(f) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return data |
def replace(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None, tzinfo=None):
"""
Returns a new datetime.datetime or asn1crypto.util.extended_datetime
object with the specified components replaced
:return:
A datetime.datetime or asn1crypto.util.extended_datetime object
"""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is None:
tzinfo = self.tzinfo
if year > 0:
cls = datetime
else:
cls = extended_datetime
return cls(
year,
month,
day,
hour,
minute,
second,
microsecond,
tzinfo
) | def function[replace, parameter[self, year, month, day, hour, minute, second, microsecond, tzinfo]]:
constant[
Returns a new datetime.datetime or asn1crypto.util.extended_datetime
object with the specified components replaced
:return:
A datetime.datetime or asn1crypto.util.extended_datetime object
]
if compare[name[year] is constant[None]] begin[:]
variable[year] assign[=] name[self].year
if compare[name[month] is constant[None]] begin[:]
variable[month] assign[=] name[self].month
if compare[name[day] is constant[None]] begin[:]
variable[day] assign[=] name[self].day
if compare[name[hour] is constant[None]] begin[:]
variable[hour] assign[=] name[self].hour
if compare[name[minute] is constant[None]] begin[:]
variable[minute] assign[=] name[self].minute
if compare[name[second] is constant[None]] begin[:]
variable[second] assign[=] name[self].second
if compare[name[microsecond] is constant[None]] begin[:]
variable[microsecond] assign[=] name[self].microsecond
if compare[name[tzinfo] is constant[None]] begin[:]
variable[tzinfo] assign[=] name[self].tzinfo
if compare[name[year] greater[>] constant[0]] begin[:]
variable[cls] assign[=] name[datetime]
return[call[name[cls], parameter[name[year], name[month], name[day], name[hour], name[minute], name[second], name[microsecond], name[tzinfo]]]] | keyword[def] identifier[replace] ( identifier[self] , identifier[year] = keyword[None] , identifier[month] = keyword[None] , identifier[day] = keyword[None] , identifier[hour] = keyword[None] , identifier[minute] = keyword[None] ,
identifier[second] = keyword[None] , identifier[microsecond] = keyword[None] , identifier[tzinfo] = keyword[None] ):
literal[string]
keyword[if] identifier[year] keyword[is] keyword[None] :
identifier[year] = identifier[self] . identifier[year]
keyword[if] identifier[month] keyword[is] keyword[None] :
identifier[month] = identifier[self] . identifier[month]
keyword[if] identifier[day] keyword[is] keyword[None] :
identifier[day] = identifier[self] . identifier[day]
keyword[if] identifier[hour] keyword[is] keyword[None] :
identifier[hour] = identifier[self] . identifier[hour]
keyword[if] identifier[minute] keyword[is] keyword[None] :
identifier[minute] = identifier[self] . identifier[minute]
keyword[if] identifier[second] keyword[is] keyword[None] :
identifier[second] = identifier[self] . identifier[second]
keyword[if] identifier[microsecond] keyword[is] keyword[None] :
identifier[microsecond] = identifier[self] . identifier[microsecond]
keyword[if] identifier[tzinfo] keyword[is] keyword[None] :
identifier[tzinfo] = identifier[self] . identifier[tzinfo]
keyword[if] identifier[year] > literal[int] :
identifier[cls] = identifier[datetime]
keyword[else] :
identifier[cls] = identifier[extended_datetime]
keyword[return] identifier[cls] (
identifier[year] ,
identifier[month] ,
identifier[day] ,
identifier[hour] ,
identifier[minute] ,
identifier[second] ,
identifier[microsecond] ,
identifier[tzinfo]
) | def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None):
"""
Returns a new datetime.datetime or asn1crypto.util.extended_datetime
object with the specified components replaced
:return:
A datetime.datetime or asn1crypto.util.extended_datetime object
"""
if year is None:
year = self.year # depends on [control=['if'], data=['year']]
if month is None:
month = self.month # depends on [control=['if'], data=['month']]
if day is None:
day = self.day # depends on [control=['if'], data=['day']]
if hour is None:
hour = self.hour # depends on [control=['if'], data=['hour']]
if minute is None:
minute = self.minute # depends on [control=['if'], data=['minute']]
if second is None:
second = self.second # depends on [control=['if'], data=['second']]
if microsecond is None:
microsecond = self.microsecond # depends on [control=['if'], data=['microsecond']]
if tzinfo is None:
tzinfo = self.tzinfo # depends on [control=['if'], data=['tzinfo']]
if year > 0:
cls = datetime # depends on [control=['if'], data=[]]
else:
cls = extended_datetime
return cls(year, month, day, hour, minute, second, microsecond, tzinfo) |
def fcoe_get_login_output_fcoe_login_list_fcoe_login_direct_attached(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
fcoe_login_list = ET.SubElement(output, "fcoe-login-list")
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac")
fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac')
fcoe_login_direct_attached = ET.SubElement(fcoe_login_list, "fcoe-login-direct-attached")
fcoe_login_direct_attached.text = kwargs.pop('fcoe_login_direct_attached')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[fcoe_get_login_output_fcoe_login_list_fcoe_login_direct_attached, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[fcoe_get_login] assign[=] call[name[ET].Element, parameter[constant[fcoe_get_login]]]
variable[config] assign[=] name[fcoe_get_login]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[fcoe_get_login], constant[output]]]
variable[fcoe_login_list] assign[=] call[name[ET].SubElement, parameter[name[output], constant[fcoe-login-list]]]
variable[fcoe_login_session_mac_key] assign[=] call[name[ET].SubElement, parameter[name[fcoe_login_list], constant[fcoe-login-session-mac]]]
name[fcoe_login_session_mac_key].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_login_session_mac]]]
variable[fcoe_login_direct_attached] assign[=] call[name[ET].SubElement, parameter[name[fcoe_login_list], constant[fcoe-login-direct-attached]]]
name[fcoe_login_direct_attached].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_login_direct_attached]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[fcoe_get_login_output_fcoe_login_list_fcoe_login_direct_attached] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[fcoe_get_login] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[fcoe_get_login]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_get_login] , literal[string] )
identifier[fcoe_login_list] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[fcoe_login_session_mac_key] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_login_list] , literal[string] )
identifier[fcoe_login_session_mac_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[fcoe_login_direct_attached] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_login_list] , literal[string] )
identifier[fcoe_login_direct_attached] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def fcoe_get_login_output_fcoe_login_list_fcoe_login_direct_attached(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
fcoe_get_login = ET.Element('fcoe_get_login')
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, 'output')
fcoe_login_list = ET.SubElement(output, 'fcoe-login-list')
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, 'fcoe-login-session-mac')
fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac')
fcoe_login_direct_attached = ET.SubElement(fcoe_login_list, 'fcoe-login-direct-attached')
fcoe_login_direct_attached.text = kwargs.pop('fcoe_login_direct_attached')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def from_dict(self, mapdict):
""" Import the attribute map from a dictionary
:param mapdict: The dictionary
"""
self.name_format = mapdict["identifier"]
try:
self._fro = dict(
[(k.lower(), v) for k, v in mapdict["fro"].items()])
except KeyError:
pass
try:
self._to = dict([(k.lower(), v) for k, v in mapdict["to"].items()])
except KeyError:
pass
if self._fro is None and self._to is None:
raise ConverterError("Missing specifications")
if self._fro is None or self._to is None:
self.adjust() | def function[from_dict, parameter[self, mapdict]]:
constant[ Import the attribute map from a dictionary
:param mapdict: The dictionary
]
name[self].name_format assign[=] call[name[mapdict]][constant[identifier]]
<ast.Try object at 0x7da1b206a4d0>
<ast.Try object at 0x7da20e9608b0>
if <ast.BoolOp object at 0x7da20e962da0> begin[:]
<ast.Raise object at 0x7da20e961e40>
if <ast.BoolOp object at 0x7da20e962ce0> begin[:]
call[name[self].adjust, parameter[]] | keyword[def] identifier[from_dict] ( identifier[self] , identifier[mapdict] ):
literal[string]
identifier[self] . identifier[name_format] = identifier[mapdict] [ literal[string] ]
keyword[try] :
identifier[self] . identifier[_fro] = identifier[dict] (
[( identifier[k] . identifier[lower] (), identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[mapdict] [ literal[string] ]. identifier[items] ()])
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[try] :
identifier[self] . identifier[_to] = identifier[dict] ([( identifier[k] . identifier[lower] (), identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[mapdict] [ literal[string] ]. identifier[items] ()])
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[if] identifier[self] . identifier[_fro] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[_to] keyword[is] keyword[None] :
keyword[raise] identifier[ConverterError] ( literal[string] )
keyword[if] identifier[self] . identifier[_fro] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[_to] keyword[is] keyword[None] :
identifier[self] . identifier[adjust] () | def from_dict(self, mapdict):
""" Import the attribute map from a dictionary
:param mapdict: The dictionary
"""
self.name_format = mapdict['identifier']
try:
self._fro = dict([(k.lower(), v) for (k, v) in mapdict['fro'].items()]) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
try:
self._to = dict([(k.lower(), v) for (k, v) in mapdict['to'].items()]) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
if self._fro is None and self._to is None:
raise ConverterError('Missing specifications') # depends on [control=['if'], data=[]]
if self._fro is None or self._to is None:
self.adjust() # depends on [control=['if'], data=[]] |
def sign(self, blob, identity):
"""Sign a given blob using the specified identity on the device."""
conn = self.conn_factory()
return conn.sign_ssh_challenge(blob=blob, identity=identity) | def function[sign, parameter[self, blob, identity]]:
constant[Sign a given blob using the specified identity on the device.]
variable[conn] assign[=] call[name[self].conn_factory, parameter[]]
return[call[name[conn].sign_ssh_challenge, parameter[]]] | keyword[def] identifier[sign] ( identifier[self] , identifier[blob] , identifier[identity] ):
literal[string]
identifier[conn] = identifier[self] . identifier[conn_factory] ()
keyword[return] identifier[conn] . identifier[sign_ssh_challenge] ( identifier[blob] = identifier[blob] , identifier[identity] = identifier[identity] ) | def sign(self, blob, identity):
"""Sign a given blob using the specified identity on the device."""
conn = self.conn_factory()
return conn.sign_ssh_challenge(blob=blob, identity=identity) |
def to_sparse(self, format='csr', **kwargs):
"""Convert into a sparse matrix.
Parameters
----------
format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}
Sparse matrix format.
kwargs : keyword arguments
Passed through to sparse matrix constructor.
Returns
-------
m : scipy.sparse.spmatrix
Sparse matrix
Notes
-----
If a mask has been set, it is ignored by this function.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 1], [0, 1]],
... [[1, 1], [0, 0]],
... [[0, 0], [-1, -1]]], dtype='i1')
>>> m = g.to_sparse(format='csr')
>>> m
<4x4 sparse matrix of type '<class 'numpy.int8'>'
with 6 stored elements in Compressed Sparse Row format>
>>> m.data
array([ 1, 1, 1, 1, -1, -1], dtype=int8)
>>> m.indices
array([1, 3, 0, 1, 2, 3], dtype=int32)
>>> m.indptr
array([0, 0, 2, 4, 6], dtype=int32)
"""
h = self.to_haplotypes()
m = h.to_sparse(format=format, **kwargs)
return m | def function[to_sparse, parameter[self, format]]:
constant[Convert into a sparse matrix.
Parameters
----------
format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}
Sparse matrix format.
kwargs : keyword arguments
Passed through to sparse matrix constructor.
Returns
-------
m : scipy.sparse.spmatrix
Sparse matrix
Notes
-----
If a mask has been set, it is ignored by this function.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 1], [0, 1]],
... [[1, 1], [0, 0]],
... [[0, 0], [-1, -1]]], dtype='i1')
>>> m = g.to_sparse(format='csr')
>>> m
<4x4 sparse matrix of type '<class 'numpy.int8'>'
with 6 stored elements in Compressed Sparse Row format>
>>> m.data
array([ 1, 1, 1, 1, -1, -1], dtype=int8)
>>> m.indices
array([1, 3, 0, 1, 2, 3], dtype=int32)
>>> m.indptr
array([0, 0, 2, 4, 6], dtype=int32)
]
variable[h] assign[=] call[name[self].to_haplotypes, parameter[]]
variable[m] assign[=] call[name[h].to_sparse, parameter[]]
return[name[m]] | keyword[def] identifier[to_sparse] ( identifier[self] , identifier[format] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[h] = identifier[self] . identifier[to_haplotypes] ()
identifier[m] = identifier[h] . identifier[to_sparse] ( identifier[format] = identifier[format] ,** identifier[kwargs] )
keyword[return] identifier[m] | def to_sparse(self, format='csr', **kwargs):
"""Convert into a sparse matrix.
Parameters
----------
format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}
Sparse matrix format.
kwargs : keyword arguments
Passed through to sparse matrix constructor.
Returns
-------
m : scipy.sparse.spmatrix
Sparse matrix
Notes
-----
If a mask has been set, it is ignored by this function.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 1], [0, 1]],
... [[1, 1], [0, 0]],
... [[0, 0], [-1, -1]]], dtype='i1')
>>> m = g.to_sparse(format='csr')
>>> m
<4x4 sparse matrix of type '<class 'numpy.int8'>'
with 6 stored elements in Compressed Sparse Row format>
>>> m.data
array([ 1, 1, 1, 1, -1, -1], dtype=int8)
>>> m.indices
array([1, 3, 0, 1, 2, 3], dtype=int32)
>>> m.indptr
array([0, 0, 2, 4, 6], dtype=int32)
"""
h = self.to_haplotypes()
m = h.to_sparse(format=format, **kwargs)
return m |
def last_active(self):
"""
Returns the last non-skipped actor.
:return: the last active actor, None if not available
:rtype: Actor
"""
result = None
for actor in reversed(self.actors):
if not actor.skip:
result = actor
break
return result | def function[last_active, parameter[self]]:
constant[
Returns the last non-skipped actor.
:return: the last active actor, None if not available
:rtype: Actor
]
variable[result] assign[=] constant[None]
for taget[name[actor]] in starred[call[name[reversed], parameter[name[self].actors]]] begin[:]
if <ast.UnaryOp object at 0x7da1b06bc100> begin[:]
variable[result] assign[=] name[actor]
break
return[name[result]] | keyword[def] identifier[last_active] ( identifier[self] ):
literal[string]
identifier[result] = keyword[None]
keyword[for] identifier[actor] keyword[in] identifier[reversed] ( identifier[self] . identifier[actors] ):
keyword[if] keyword[not] identifier[actor] . identifier[skip] :
identifier[result] = identifier[actor]
keyword[break]
keyword[return] identifier[result] | def last_active(self):
"""
Returns the last non-skipped actor.
:return: the last active actor, None if not available
:rtype: Actor
"""
result = None
for actor in reversed(self.actors):
if not actor.skip:
result = actor
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['actor']]
return result |
def csv_tolist(path_to_file, **kwargs):
"""
Parse the csv file to a list of rows.
"""
result = []
encoding = kwargs.get('encoding', 'utf-8')
delimiter = kwargs.get('delimiter', ',')
dialect = kwargs.get('dialect', csv.excel)
_, _ext = path_to_file.split('.', 1)
try:
file = codecs.open(path_to_file, 'r', encoding)
items_file = io.TextIOWrapper(file, encoding=encoding)
result = list(
csv.reader(items_file, delimiter=delimiter, dialect=dialect))
items_file.close()
file.close()
except Exception as ex:
result = []
logger.error('Fail parsing csv to list of rows - {}'.format(ex))
return result | def function[csv_tolist, parameter[path_to_file]]:
constant[
Parse the csv file to a list of rows.
]
variable[result] assign[=] list[[]]
variable[encoding] assign[=] call[name[kwargs].get, parameter[constant[encoding], constant[utf-8]]]
variable[delimiter] assign[=] call[name[kwargs].get, parameter[constant[delimiter], constant[,]]]
variable[dialect] assign[=] call[name[kwargs].get, parameter[constant[dialect], name[csv].excel]]
<ast.Tuple object at 0x7da20c993fd0> assign[=] call[name[path_to_file].split, parameter[constant[.], constant[1]]]
<ast.Try object at 0x7da20c9922f0>
return[name[result]] | keyword[def] identifier[csv_tolist] ( identifier[path_to_file] ,** identifier[kwargs] ):
literal[string]
identifier[result] =[]
identifier[encoding] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[delimiter] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[dialect] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[csv] . identifier[excel] )
identifier[_] , identifier[_ext] = identifier[path_to_file] . identifier[split] ( literal[string] , literal[int] )
keyword[try] :
identifier[file] = identifier[codecs] . identifier[open] ( identifier[path_to_file] , literal[string] , identifier[encoding] )
identifier[items_file] = identifier[io] . identifier[TextIOWrapper] ( identifier[file] , identifier[encoding] = identifier[encoding] )
identifier[result] = identifier[list] (
identifier[csv] . identifier[reader] ( identifier[items_file] , identifier[delimiter] = identifier[delimiter] , identifier[dialect] = identifier[dialect] ))
identifier[items_file] . identifier[close] ()
identifier[file] . identifier[close] ()
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[result] =[]
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[ex] ))
keyword[return] identifier[result] | def csv_tolist(path_to_file, **kwargs):
"""
Parse the csv file to a list of rows.
"""
result = []
encoding = kwargs.get('encoding', 'utf-8')
delimiter = kwargs.get('delimiter', ',')
dialect = kwargs.get('dialect', csv.excel)
(_, _ext) = path_to_file.split('.', 1)
try:
file = codecs.open(path_to_file, 'r', encoding)
items_file = io.TextIOWrapper(file, encoding=encoding)
result = list(csv.reader(items_file, delimiter=delimiter, dialect=dialect))
items_file.close()
file.close() # depends on [control=['try'], data=[]]
except Exception as ex:
result = []
logger.error('Fail parsing csv to list of rows - {}'.format(ex)) # depends on [control=['except'], data=['ex']]
return result |
def recommendations(self, **kwargs):
"""
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | def function[recommendations, parameter[self]]:
constant[
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
]
variable[path] assign[=] call[name[self]._get_id_path, parameter[constant[recommendations]]]
variable[response] assign[=] call[name[self]._GET, parameter[name[path], name[kwargs]]]
call[name[self]._set_attrs_to_values, parameter[name[response]]]
return[name[response]] | keyword[def] identifier[recommendations] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[path] = identifier[self] . identifier[_get_id_path] ( literal[string] )
identifier[response] = identifier[self] . identifier[_GET] ( identifier[path] , identifier[kwargs] )
identifier[self] . identifier[_set_attrs_to_values] ( identifier[response] )
keyword[return] identifier[response] | def recommendations(self, **kwargs):
"""
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def add_user(self, group, username):
"""
Add a user to the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to add
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inherits from InvalidResult. See #lookup_id for more info.
"""
try:
self.lookup_id(group)
except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover
raise err from None
operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]}
self.client.modify(self.__distinguished_name(group), operation) | def function[add_user, parameter[self, group, username]]:
constant[
Add a user to the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to add
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inherits from InvalidResult. See #lookup_id for more info.
]
<ast.Try object at 0x7da18bc70820>
variable[operation] assign[=] dictionary[[<ast.Constant object at 0x7da18eb552a0>], [<ast.List object at 0x7da18eb55360>]]
call[name[self].client.modify, parameter[call[name[self].__distinguished_name, parameter[name[group]]], name[operation]]] | keyword[def] identifier[add_user] ( identifier[self] , identifier[group] , identifier[username] ):
literal[string]
keyword[try] :
identifier[self] . identifier[lookup_id] ( identifier[group] )
keyword[except] identifier[ldap_tools] . identifier[exceptions] . identifier[InvalidResult] keyword[as] identifier[err] :
keyword[raise] identifier[err] keyword[from] keyword[None]
identifier[operation] ={ literal[string] :[( identifier[ldap3] . identifier[MODIFY_ADD] ,[ identifier[username] ])]}
identifier[self] . identifier[client] . identifier[modify] ( identifier[self] . identifier[__distinguished_name] ( identifier[group] ), identifier[operation] ) | def add_user(self, group, username):
"""
Add a user to the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to add
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inherits from InvalidResult. See #lookup_id for more info.
"""
try:
self.lookup_id(group) # depends on [control=['try'], data=[]]
except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover
raise err from None # depends on [control=['except'], data=['err']]
operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]}
self.client.modify(self.__distinguished_name(group), operation) |
def tx_xml(self):
"""
Return the ``<c:tx>`` (tx is short for 'text') element for this
series as unicode text. This element contains the series name.
"""
return self._tx_tmpl.format(**{
'wksht_ref': self._series.name_ref,
'series_name': self.name,
'nsdecls': '',
}) | def function[tx_xml, parameter[self]]:
constant[
Return the ``<c:tx>`` (tx is short for 'text') element for this
series as unicode text. This element contains the series name.
]
return[call[name[self]._tx_tmpl.format, parameter[]]] | keyword[def] identifier[tx_xml] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[_tx_tmpl] . identifier[format] (**{
literal[string] : identifier[self] . identifier[_series] . identifier[name_ref] ,
literal[string] : identifier[self] . identifier[name] ,
literal[string] : literal[string] ,
}) | def tx_xml(self):
"""
Return the ``<c:tx>`` (tx is short for 'text') element for this
series as unicode text. This element contains the series name.
"""
return self._tx_tmpl.format(**{'wksht_ref': self._series.name_ref, 'series_name': self.name, 'nsdecls': ''}) |
def create_item(self, token, name, parent_id, **kwargs):
"""
Create an item to the server.
:param token: A valid token for the user in question.
:type token: string
:param name: The name of the item to be created.
:type name: string
:param parent_id: The id of the destination folder.
:type parent_id: int | long
:param description: (optional) The description text of the item.
:type description: string
:param uuid: (optional) The UUID for the item. It will be generated if
not given.
:type uuid: string
:param privacy: (optional) The privacy state of the item
('Public' or 'Private').
:type privacy: string
:returns: Dictionary containing the details of the created item.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['name'] = name
parameters['parentid'] = parent_id
optional_keys = ['description', 'uuid', 'privacy']
for key in optional_keys:
if key in kwargs:
parameters[key] = kwargs[key]
response = self.request('midas.item.create', parameters)
return response | def function[create_item, parameter[self, token, name, parent_id]]:
constant[
Create an item to the server.
:param token: A valid token for the user in question.
:type token: string
:param name: The name of the item to be created.
:type name: string
:param parent_id: The id of the destination folder.
:type parent_id: int | long
:param description: (optional) The description text of the item.
:type description: string
:param uuid: (optional) The UUID for the item. It will be generated if
not given.
:type uuid: string
:param privacy: (optional) The privacy state of the item
('Public' or 'Private').
:type privacy: string
:returns: Dictionary containing the details of the created item.
:rtype: dict
]
variable[parameters] assign[=] call[name[dict], parameter[]]
call[name[parameters]][constant[token]] assign[=] name[token]
call[name[parameters]][constant[name]] assign[=] name[name]
call[name[parameters]][constant[parentid]] assign[=] name[parent_id]
variable[optional_keys] assign[=] list[[<ast.Constant object at 0x7da1afe71c30>, <ast.Constant object at 0x7da1afe73df0>, <ast.Constant object at 0x7da1afe72830>]]
for taget[name[key]] in starred[name[optional_keys]] begin[:]
if compare[name[key] in name[kwargs]] begin[:]
call[name[parameters]][name[key]] assign[=] call[name[kwargs]][name[key]]
variable[response] assign[=] call[name[self].request, parameter[constant[midas.item.create], name[parameters]]]
return[name[response]] | keyword[def] identifier[create_item] ( identifier[self] , identifier[token] , identifier[name] , identifier[parent_id] ,** identifier[kwargs] ):
literal[string]
identifier[parameters] = identifier[dict] ()
identifier[parameters] [ literal[string] ]= identifier[token]
identifier[parameters] [ literal[string] ]= identifier[name]
identifier[parameters] [ literal[string] ]= identifier[parent_id]
identifier[optional_keys] =[ literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[key] keyword[in] identifier[optional_keys] :
keyword[if] identifier[key] keyword[in] identifier[kwargs] :
identifier[parameters] [ identifier[key] ]= identifier[kwargs] [ identifier[key] ]
identifier[response] = identifier[self] . identifier[request] ( literal[string] , identifier[parameters] )
keyword[return] identifier[response] | def create_item(self, token, name, parent_id, **kwargs):
"""
Create an item to the server.
:param token: A valid token for the user in question.
:type token: string
:param name: The name of the item to be created.
:type name: string
:param parent_id: The id of the destination folder.
:type parent_id: int | long
:param description: (optional) The description text of the item.
:type description: string
:param uuid: (optional) The UUID for the item. It will be generated if
not given.
:type uuid: string
:param privacy: (optional) The privacy state of the item
('Public' or 'Private').
:type privacy: string
:returns: Dictionary containing the details of the created item.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['name'] = name
parameters['parentid'] = parent_id
optional_keys = ['description', 'uuid', 'privacy']
for key in optional_keys:
if key in kwargs:
parameters[key] = kwargs[key] # depends on [control=['if'], data=['key', 'kwargs']] # depends on [control=['for'], data=['key']]
response = self.request('midas.item.create', parameters)
return response |
def get_all_payments_of_incoming(self, incoming_id):
"""
Get all payments of incoming
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param incoming_id: the incoming id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_payments_of_incoming_per_page,
resource=INCOMING_PAYMENTS,
**{'incoming_id': incoming_id}
) | def function[get_all_payments_of_incoming, parameter[self, incoming_id]]:
constant[
Get all payments of incoming
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param incoming_id: the incoming id
:return: list
]
return[call[name[self]._iterate_through_pages, parameter[]]] | keyword[def] identifier[get_all_payments_of_incoming] ( identifier[self] , identifier[incoming_id] ):
literal[string]
keyword[return] identifier[self] . identifier[_iterate_through_pages] (
identifier[get_function] = identifier[self] . identifier[get_payments_of_incoming_per_page] ,
identifier[resource] = identifier[INCOMING_PAYMENTS] ,
**{ literal[string] : identifier[incoming_id] }
) | def get_all_payments_of_incoming(self, incoming_id):
"""
Get all payments of incoming
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param incoming_id: the incoming id
:return: list
"""
return self._iterate_through_pages(get_function=self.get_payments_of_incoming_per_page, resource=INCOMING_PAYMENTS, **{'incoming_id': incoming_id}) |
def work(self):
"""
Entry point after argument processing.
:return:
"""
self.roca.do_print = True
ret = self.process_inputs()
if self.args.dump:
self.roca.dump(ret)
if self.roca.found > 0:
logger.info('Fingerprinted keys found: %s' % self.roca.found)
logger.info('WARNING: Potential vulnerability')
else:
logger.info('No fingerprinted keys found (OK)') | def function[work, parameter[self]]:
constant[
Entry point after argument processing.
:return:
]
name[self].roca.do_print assign[=] constant[True]
variable[ret] assign[=] call[name[self].process_inputs, parameter[]]
if name[self].args.dump begin[:]
call[name[self].roca.dump, parameter[name[ret]]]
if compare[name[self].roca.found greater[>] constant[0]] begin[:]
call[name[logger].info, parameter[binary_operation[constant[Fingerprinted keys found: %s] <ast.Mod object at 0x7da2590d6920> name[self].roca.found]]]
call[name[logger].info, parameter[constant[WARNING: Potential vulnerability]]] | keyword[def] identifier[work] ( identifier[self] ):
literal[string]
identifier[self] . identifier[roca] . identifier[do_print] = keyword[True]
identifier[ret] = identifier[self] . identifier[process_inputs] ()
keyword[if] identifier[self] . identifier[args] . identifier[dump] :
identifier[self] . identifier[roca] . identifier[dump] ( identifier[ret] )
keyword[if] identifier[self] . identifier[roca] . identifier[found] > literal[int] :
identifier[logger] . identifier[info] ( literal[string] % identifier[self] . identifier[roca] . identifier[found] )
identifier[logger] . identifier[info] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] ) | def work(self):
"""
Entry point after argument processing.
:return:
"""
self.roca.do_print = True
ret = self.process_inputs()
if self.args.dump:
self.roca.dump(ret) # depends on [control=['if'], data=[]]
if self.roca.found > 0:
logger.info('Fingerprinted keys found: %s' % self.roca.found)
logger.info('WARNING: Potential vulnerability') # depends on [control=['if'], data=[]]
else:
logger.info('No fingerprinted keys found (OK)') |
def make_placeholders(seq, start=1):
"""
Generate placeholders for the given sequence.
"""
if len(seq) == 0:
raise ValueError('Sequence must have at least one element.')
param_style = Context.current().param_style
placeholders = None
if isinstance(seq, dict):
if param_style in ('named', 'pyformat'):
template = ':%s' if param_style == 'named' else '%%(%s)s'
placeholders = (template % key
for key in six.iterkeys(seq))
elif isinstance(seq, (list, tuple)):
if param_style == 'numeric':
placeholders = (':%d' % i
for i in xrange(start, start + len(seq)))
elif param_style in ('qmark', 'format', 'pyformat'):
placeholders = itertools.repeat(
'?' if param_style == 'qmark' else '%s',
len(seq))
if placeholders is None:
raise NotSupported(
"Param style '%s' does not support sequence type '%s'" % (
param_style, seq.__class__.__name__))
return ', '.join(placeholders) | def function[make_placeholders, parameter[seq, start]]:
constant[
Generate placeholders for the given sequence.
]
if compare[call[name[len], parameter[name[seq]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b28fe440>
variable[param_style] assign[=] call[name[Context].current, parameter[]].param_style
variable[placeholders] assign[=] constant[None]
if call[name[isinstance], parameter[name[seq], name[dict]]] begin[:]
if compare[name[param_style] in tuple[[<ast.Constant object at 0x7da1b28ff100>, <ast.Constant object at 0x7da1b28fdd80>]]] begin[:]
variable[template] assign[=] <ast.IfExp object at 0x7da1b28ff010>
variable[placeholders] assign[=] <ast.GeneratorExp object at 0x7da1b28fcac0>
if compare[name[placeholders] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c795ed0>
return[call[constant[, ].join, parameter[name[placeholders]]]] | keyword[def] identifier[make_placeholders] ( identifier[seq] , identifier[start] = literal[int] ):
literal[string]
keyword[if] identifier[len] ( identifier[seq] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[param_style] = identifier[Context] . identifier[current] (). identifier[param_style]
identifier[placeholders] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[seq] , identifier[dict] ):
keyword[if] identifier[param_style] keyword[in] ( literal[string] , literal[string] ):
identifier[template] = literal[string] keyword[if] identifier[param_style] == literal[string] keyword[else] literal[string]
identifier[placeholders] =( identifier[template] % identifier[key]
keyword[for] identifier[key] keyword[in] identifier[six] . identifier[iterkeys] ( identifier[seq] ))
keyword[elif] identifier[isinstance] ( identifier[seq] ,( identifier[list] , identifier[tuple] )):
keyword[if] identifier[param_style] == literal[string] :
identifier[placeholders] =( literal[string] % identifier[i]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[start] , identifier[start] + identifier[len] ( identifier[seq] )))
keyword[elif] identifier[param_style] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[placeholders] = identifier[itertools] . identifier[repeat] (
literal[string] keyword[if] identifier[param_style] == literal[string] keyword[else] literal[string] ,
identifier[len] ( identifier[seq] ))
keyword[if] identifier[placeholders] keyword[is] keyword[None] :
keyword[raise] identifier[NotSupported] (
literal[string] %(
identifier[param_style] , identifier[seq] . identifier[__class__] . identifier[__name__] ))
keyword[return] literal[string] . identifier[join] ( identifier[placeholders] ) | def make_placeholders(seq, start=1):
"""
Generate placeholders for the given sequence.
"""
if len(seq) == 0:
raise ValueError('Sequence must have at least one element.') # depends on [control=['if'], data=[]]
param_style = Context.current().param_style
placeholders = None
if isinstance(seq, dict):
if param_style in ('named', 'pyformat'):
template = ':%s' if param_style == 'named' else '%%(%s)s'
placeholders = (template % key for key in six.iterkeys(seq)) # depends on [control=['if'], data=['param_style']] # depends on [control=['if'], data=[]]
elif isinstance(seq, (list, tuple)):
if param_style == 'numeric':
placeholders = (':%d' % i for i in xrange(start, start + len(seq))) # depends on [control=['if'], data=[]]
elif param_style in ('qmark', 'format', 'pyformat'):
placeholders = itertools.repeat('?' if param_style == 'qmark' else '%s', len(seq)) # depends on [control=['if'], data=['param_style']] # depends on [control=['if'], data=[]]
if placeholders is None:
raise NotSupported("Param style '%s' does not support sequence type '%s'" % (param_style, seq.__class__.__name__)) # depends on [control=['if'], data=[]]
return ', '.join(placeholders) |
async def loop(self):
"""Pulse every timeout seconds until stopped."""
while not self.stopped:
self.timeout_handle = self.pyvlx.connection.loop.call_later(
self.timeout_in_seconds, self.loop_timeout)
await self.loop_event.wait()
if not self.stopped:
self.loop_event.clear()
await self.pulse()
self.cancel_loop_timeout()
self.stopped_event.set() | <ast.AsyncFunctionDef object at 0x7da18f58d960> | keyword[async] keyword[def] identifier[loop] ( identifier[self] ):
literal[string]
keyword[while] keyword[not] identifier[self] . identifier[stopped] :
identifier[self] . identifier[timeout_handle] = identifier[self] . identifier[pyvlx] . identifier[connection] . identifier[loop] . identifier[call_later] (
identifier[self] . identifier[timeout_in_seconds] , identifier[self] . identifier[loop_timeout] )
keyword[await] identifier[self] . identifier[loop_event] . identifier[wait] ()
keyword[if] keyword[not] identifier[self] . identifier[stopped] :
identifier[self] . identifier[loop_event] . identifier[clear] ()
keyword[await] identifier[self] . identifier[pulse] ()
identifier[self] . identifier[cancel_loop_timeout] ()
identifier[self] . identifier[stopped_event] . identifier[set] () | async def loop(self):
"""Pulse every timeout seconds until stopped."""
while not self.stopped:
self.timeout_handle = self.pyvlx.connection.loop.call_later(self.timeout_in_seconds, self.loop_timeout)
await self.loop_event.wait()
if not self.stopped:
self.loop_event.clear()
await self.pulse() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
self.cancel_loop_timeout()
self.stopped_event.set() |
def bibitem_as_plaintext(bibitem):
"""
Return a plaintext representation of a bibitem from the ``.bbl`` file.
.. note::
This plaintext representation can be super ugly, contain URLs and so \
on.
.. note::
You need to have ``delatex`` installed system-wide, or to build it in \
this repo, according to the ``README.md`` before using this \
function.
:param bibitem: The text content of the bibitem.
:returns: A cleaned plaintext citation from the bibitem.
"""
try:
output = subprocess.check_output(["delatex",
"-s"],
input=bibitem.encode("utf-8"))
except FileNotFoundError:
script_dir = os.path.dirname(os.path.abspath(__file__))
output = subprocess.check_output(["%s/../external/opendetex/delatex" %
(script_dir,),
"-s"],
input=bibitem.encode("utf-8"))
output = output.decode("utf-8")
output = tools.clean_whitespaces(output)
return output | def function[bibitem_as_plaintext, parameter[bibitem]]:
constant[
Return a plaintext representation of a bibitem from the ``.bbl`` file.
.. note::
This plaintext representation can be super ugly, contain URLs and so on.
.. note::
You need to have ``delatex`` installed system-wide, or to build it in this repo, according to the ``README.md`` before using this function.
:param bibitem: The text content of the bibitem.
:returns: A cleaned plaintext citation from the bibitem.
]
<ast.Try object at 0x7da1b257ef20>
variable[output] assign[=] call[name[output].decode, parameter[constant[utf-8]]]
variable[output] assign[=] call[name[tools].clean_whitespaces, parameter[name[output]]]
return[name[output]] | keyword[def] identifier[bibitem_as_plaintext] ( identifier[bibitem] ):
literal[string]
keyword[try] :
identifier[output] = identifier[subprocess] . identifier[check_output] ([ literal[string] ,
literal[string] ],
identifier[input] = identifier[bibitem] . identifier[encode] ( literal[string] ))
keyword[except] identifier[FileNotFoundError] :
identifier[script_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] ))
identifier[output] = identifier[subprocess] . identifier[check_output] ([ literal[string] %
( identifier[script_dir] ,),
literal[string] ],
identifier[input] = identifier[bibitem] . identifier[encode] ( literal[string] ))
identifier[output] = identifier[output] . identifier[decode] ( literal[string] )
identifier[output] = identifier[tools] . identifier[clean_whitespaces] ( identifier[output] )
keyword[return] identifier[output] | def bibitem_as_plaintext(bibitem):
"""
Return a plaintext representation of a bibitem from the ``.bbl`` file.
.. note::
This plaintext representation can be super ugly, contain URLs and so on.
.. note::
You need to have ``delatex`` installed system-wide, or to build it in this repo, according to the ``README.md`` before using this function.
:param bibitem: The text content of the bibitem.
:returns: A cleaned plaintext citation from the bibitem.
"""
try:
output = subprocess.check_output(['delatex', '-s'], input=bibitem.encode('utf-8')) # depends on [control=['try'], data=[]]
except FileNotFoundError:
script_dir = os.path.dirname(os.path.abspath(__file__))
output = subprocess.check_output(['%s/../external/opendetex/delatex' % (script_dir,), '-s'], input=bibitem.encode('utf-8')) # depends on [control=['except'], data=[]]
output = output.decode('utf-8')
output = tools.clean_whitespaces(output)
return output |
def recover(self, requeue=False):
"""Redeliver unacknowledged messages.
:param bool requeue: Re-queue the messages
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not isinstance(requeue, bool):
raise AMQPInvalidArgument('requeue should be a boolean')
recover_frame = specification.Basic.Recover(requeue=requeue)
return self._channel.rpc_request(recover_frame) | def function[recover, parameter[self, requeue]]:
constant[Redeliver unacknowledged messages.
:param bool requeue: Re-queue the messages
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
]
if <ast.UnaryOp object at 0x7da20c6ab550> begin[:]
<ast.Raise object at 0x7da20c6a96f0>
variable[recover_frame] assign[=] call[name[specification].Basic.Recover, parameter[]]
return[call[name[self]._channel.rpc_request, parameter[name[recover_frame]]]] | keyword[def] identifier[recover] ( identifier[self] , identifier[requeue] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[requeue] , identifier[bool] ):
keyword[raise] identifier[AMQPInvalidArgument] ( literal[string] )
identifier[recover_frame] = identifier[specification] . identifier[Basic] . identifier[Recover] ( identifier[requeue] = identifier[requeue] )
keyword[return] identifier[self] . identifier[_channel] . identifier[rpc_request] ( identifier[recover_frame] ) | def recover(self, requeue=False):
"""Redeliver unacknowledged messages.
:param bool requeue: Re-queue the messages
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not isinstance(requeue, bool):
raise AMQPInvalidArgument('requeue should be a boolean') # depends on [control=['if'], data=[]]
recover_frame = specification.Basic.Recover(requeue=requeue)
return self._channel.rpc_request(recover_frame) |
def present(dbname, name,
owner=None, user=None,
db_user=None, db_password=None,
db_host=None, db_port=None):
'''
Ensure that the named schema is present in the database.
dbname
The database's name will work on
name
The name of the schema to manage
user
system user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'dbname': dbname,
'name': name,
'changes': {},
'result': True,
'comment': 'Schema {0} is already present in '
'database {1}'.format(name, dbname)}
db_args = {
'db_user': db_user,
'db_password': db_password,
'db_host': db_host,
'db_port': db_port,
'user': user
}
# check if schema exists
schema_attr = __salt__['postgres.schema_get'](dbname, name, **db_args)
cret = None
# The schema is not present, make it!
if schema_attr is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Schema {0} is set to be created' \
' in database {1}.'.format(name, dbname)
return ret
cret = __salt__['postgres.schema_create'](dbname,
name,
owner=owner,
**db_args)
else:
msg = 'Schema {0} already exists in database {1}'
cret = None
if cret:
msg = 'Schema {0} has been created in database {1}'
ret['result'] = True
ret['changes'][name] = 'Present'
elif cret is not None:
msg = 'Failed to create schema {0} in database {1}'
ret['result'] = False
else:
msg = 'Schema {0} already exists in database {1}'
ret['result'] = True
ret['comment'] = msg.format(name, dbname)
return ret | def function[present, parameter[dbname, name, owner, user, db_user, db_password, db_host, db_port]]:
constant[
Ensure that the named schema is present in the database.
dbname
The database's name will work on
name
The name of the schema to manage
user
system user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da204621930>, <ast.Constant object at 0x7da204621510>, <ast.Constant object at 0x7da204622380>, <ast.Constant object at 0x7da204621d20>, <ast.Constant object at 0x7da204621f60>], [<ast.Name object at 0x7da2046224a0>, <ast.Name object at 0x7da2046229e0>, <ast.Dict object at 0x7da204623130>, <ast.Constant object at 0x7da204622cb0>, <ast.Call object at 0x7da204622c20>]]
variable[db_args] assign[=] dictionary[[<ast.Constant object at 0x7da204620400>, <ast.Constant object at 0x7da204620280>, <ast.Constant object at 0x7da204622230>, <ast.Constant object at 0x7da204621960>, <ast.Constant object at 0x7da204623670>], [<ast.Name object at 0x7da204621030>, <ast.Name object at 0x7da204622410>, <ast.Name object at 0x7da2046209a0>, <ast.Name object at 0x7da204623a30>, <ast.Name object at 0x7da204621bd0>]]
variable[schema_attr] assign[=] call[call[name[__salt__]][constant[postgres.schema_get]], parameter[name[dbname], name[name]]]
variable[cret] assign[=] constant[None]
if compare[name[schema_attr] is constant[None]] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] call[constant[Schema {0} is set to be created in database {1}.].format, parameter[name[name], name[dbname]]]
return[name[ret]]
variable[cret] assign[=] call[call[name[__salt__]][constant[postgres.schema_create]], parameter[name[dbname], name[name]]]
if name[cret] begin[:]
variable[msg] assign[=] constant[Schema {0} has been created in database {1}]
call[name[ret]][constant[result]] assign[=] constant[True]
call[call[name[ret]][constant[changes]]][name[name]] assign[=] constant[Present]
call[name[ret]][constant[comment]] assign[=] call[name[msg].format, parameter[name[name], name[dbname]]]
return[name[ret]] | keyword[def] identifier[present] ( identifier[dbname] , identifier[name] ,
identifier[owner] = keyword[None] , identifier[user] = keyword[None] ,
identifier[db_user] = keyword[None] , identifier[db_password] = keyword[None] ,
identifier[db_host] = keyword[None] , identifier[db_port] = keyword[None] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[dbname] ,
literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string]
literal[string] . identifier[format] ( identifier[name] , identifier[dbname] )}
identifier[db_args] ={
literal[string] : identifier[db_user] ,
literal[string] : identifier[db_password] ,
literal[string] : identifier[db_host] ,
literal[string] : identifier[db_port] ,
literal[string] : identifier[user]
}
identifier[schema_attr] = identifier[__salt__] [ literal[string] ]( identifier[dbname] , identifier[name] ,** identifier[db_args] )
identifier[cret] = keyword[None]
keyword[if] identifier[schema_attr] keyword[is] keyword[None] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[dbname] )
keyword[return] identifier[ret]
identifier[cret] = identifier[__salt__] [ literal[string] ]( identifier[dbname] ,
identifier[name] ,
identifier[owner] = identifier[owner] ,
** identifier[db_args] )
keyword[else] :
identifier[msg] = literal[string]
identifier[cret] = keyword[None]
keyword[if] identifier[cret] :
identifier[msg] = literal[string]
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ][ identifier[name] ]= literal[string]
keyword[elif] identifier[cret] keyword[is] keyword[not] keyword[None] :
identifier[msg] = literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[else] :
identifier[msg] = literal[string]
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= identifier[msg] . identifier[format] ( identifier[name] , identifier[dbname] )
keyword[return] identifier[ret] | def present(dbname, name, owner=None, user=None, db_user=None, db_password=None, db_host=None, db_port=None):
"""
Ensure that the named schema is present in the database.
dbname
The database's name will work on
name
The name of the schema to manage
user
system user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
"""
ret = {'dbname': dbname, 'name': name, 'changes': {}, 'result': True, 'comment': 'Schema {0} is already present in database {1}'.format(name, dbname)}
db_args = {'db_user': db_user, 'db_password': db_password, 'db_host': db_host, 'db_port': db_port, 'user': user}
# check if schema exists
schema_attr = __salt__['postgres.schema_get'](dbname, name, **db_args)
cret = None
# The schema is not present, make it!
if schema_attr is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Schema {0} is set to be created in database {1}.'.format(name, dbname)
return ret # depends on [control=['if'], data=[]]
cret = __salt__['postgres.schema_create'](dbname, name, owner=owner, **db_args) # depends on [control=['if'], data=[]]
else:
msg = 'Schema {0} already exists in database {1}'
cret = None
if cret:
msg = 'Schema {0} has been created in database {1}'
ret['result'] = True
ret['changes'][name] = 'Present' # depends on [control=['if'], data=[]]
elif cret is not None:
msg = 'Failed to create schema {0} in database {1}'
ret['result'] = False # depends on [control=['if'], data=[]]
else:
msg = 'Schema {0} already exists in database {1}'
ret['result'] = True
ret['comment'] = msg.format(name, dbname)
return ret |
def _map(self, event):
"""Extract elements from an operation event and map to a named event."""
description = event.get('description', '')
start_time = google_base.parse_rfc3339_utc_string(
event.get('timestamp', ''))
for name, regex in _EVENT_REGEX_MAP.items():
match = regex.match(description)
if match:
return {'name': name, 'start-time': start_time}, match
return {'name': description, 'start-time': start_time}, None | def function[_map, parameter[self, event]]:
constant[Extract elements from an operation event and map to a named event.]
variable[description] assign[=] call[name[event].get, parameter[constant[description], constant[]]]
variable[start_time] assign[=] call[name[google_base].parse_rfc3339_utc_string, parameter[call[name[event].get, parameter[constant[timestamp], constant[]]]]]
for taget[tuple[[<ast.Name object at 0x7da20c991a50>, <ast.Name object at 0x7da20c991870>]]] in starred[call[name[_EVENT_REGEX_MAP].items, parameter[]]] begin[:]
variable[match] assign[=] call[name[regex].match, parameter[name[description]]]
if name[match] begin[:]
return[tuple[[<ast.Dict object at 0x7da20c992260>, <ast.Name object at 0x7da20c993820>]]]
return[tuple[[<ast.Dict object at 0x7da20c992830>, <ast.Constant object at 0x7da20c993670>]]] | keyword[def] identifier[_map] ( identifier[self] , identifier[event] ):
literal[string]
identifier[description] = identifier[event] . identifier[get] ( literal[string] , literal[string] )
identifier[start_time] = identifier[google_base] . identifier[parse_rfc3339_utc_string] (
identifier[event] . identifier[get] ( literal[string] , literal[string] ))
keyword[for] identifier[name] , identifier[regex] keyword[in] identifier[_EVENT_REGEX_MAP] . identifier[items] ():
identifier[match] = identifier[regex] . identifier[match] ( identifier[description] )
keyword[if] identifier[match] :
keyword[return] { literal[string] : identifier[name] , literal[string] : identifier[start_time] }, identifier[match]
keyword[return] { literal[string] : identifier[description] , literal[string] : identifier[start_time] }, keyword[None] | def _map(self, event):
"""Extract elements from an operation event and map to a named event."""
description = event.get('description', '')
start_time = google_base.parse_rfc3339_utc_string(event.get('timestamp', ''))
for (name, regex) in _EVENT_REGEX_MAP.items():
match = regex.match(description)
if match:
return ({'name': name, 'start-time': start_time}, match) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return ({'name': description, 'start-time': start_time}, None) |
def has_code(state, text, pattern=True, not_typed_msg=None):
"""Test the student code.
Tests if the student typed a (pattern of) text. It is advised to use ``has_equal_ast()`` instead of ``has_code()``,
as it is more robust to small syntactical differences that don't change the code's behavior.
Args:
text (str): the text that is searched for
pattern (bool): if True (the default), the text is treated as a pattern. If False, it is treated as plain text.
not_typed_msg (str): feedback message to be displayed if the student did not type the text.
:Example:
Student code and solution code::
y = 1 + 2 + 3
SCT::
# Verify that student code contains pattern (not robust!!):
Ex().has_code(r"1\\s*\\+2\\s*\\+3")
"""
if not not_typed_msg:
if pattern:
not_typed_msg = "Could not find the correct pattern in your code."
else:
not_typed_msg = "Could not find the following text in your code: %r" % text
student_code = state.student_code
_msg = state.build_message(not_typed_msg)
state.do_test(
StringContainsTest(student_code, text, pattern, Feedback(_msg, state))
)
return state | def function[has_code, parameter[state, text, pattern, not_typed_msg]]:
constant[Test the student code.
Tests if the student typed a (pattern of) text. It is advised to use ``has_equal_ast()`` instead of ``has_code()``,
as it is more robust to small syntactical differences that don't change the code's behavior.
Args:
text (str): the text that is searched for
pattern (bool): if True (the default), the text is treated as a pattern. If False, it is treated as plain text.
not_typed_msg (str): feedback message to be displayed if the student did not type the text.
:Example:
Student code and solution code::
y = 1 + 2 + 3
SCT::
# Verify that student code contains pattern (not robust!!):
Ex().has_code(r"1\s*\+2\s*\+3")
]
if <ast.UnaryOp object at 0x7da1b0395360> begin[:]
if name[pattern] begin[:]
variable[not_typed_msg] assign[=] constant[Could not find the correct pattern in your code.]
variable[student_code] assign[=] name[state].student_code
variable[_msg] assign[=] call[name[state].build_message, parameter[name[not_typed_msg]]]
call[name[state].do_test, parameter[call[name[StringContainsTest], parameter[name[student_code], name[text], name[pattern], call[name[Feedback], parameter[name[_msg], name[state]]]]]]]
return[name[state]] | keyword[def] identifier[has_code] ( identifier[state] , identifier[text] , identifier[pattern] = keyword[True] , identifier[not_typed_msg] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[not_typed_msg] :
keyword[if] identifier[pattern] :
identifier[not_typed_msg] = literal[string]
keyword[else] :
identifier[not_typed_msg] = literal[string] % identifier[text]
identifier[student_code] = identifier[state] . identifier[student_code]
identifier[_msg] = identifier[state] . identifier[build_message] ( identifier[not_typed_msg] )
identifier[state] . identifier[do_test] (
identifier[StringContainsTest] ( identifier[student_code] , identifier[text] , identifier[pattern] , identifier[Feedback] ( identifier[_msg] , identifier[state] ))
)
keyword[return] identifier[state] | def has_code(state, text, pattern=True, not_typed_msg=None):
"""Test the student code.
Tests if the student typed a (pattern of) text. It is advised to use ``has_equal_ast()`` instead of ``has_code()``,
as it is more robust to small syntactical differences that don't change the code's behavior.
Args:
text (str): the text that is searched for
pattern (bool): if True (the default), the text is treated as a pattern. If False, it is treated as plain text.
not_typed_msg (str): feedback message to be displayed if the student did not type the text.
:Example:
Student code and solution code::
y = 1 + 2 + 3
SCT::
# Verify that student code contains pattern (not robust!!):
Ex().has_code(r"1\\s*\\+2\\s*\\+3")
"""
if not not_typed_msg:
if pattern:
not_typed_msg = 'Could not find the correct pattern in your code.' # depends on [control=['if'], data=[]]
else:
not_typed_msg = 'Could not find the following text in your code: %r' % text # depends on [control=['if'], data=[]]
student_code = state.student_code
_msg = state.build_message(not_typed_msg)
state.do_test(StringContainsTest(student_code, text, pattern, Feedback(_msg, state)))
return state |
def dragEnterEvent(self, event):
"""
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
"""
data = event.mimeData()
if data.hasFormat('application/x-orb-table') and \
data.hasFormat('application/x-orb-query'):
tableName = self.tableTypeName()
if nstr(data.data('application/x-orb-table')) == tableName:
event.acceptProposedAction()
return
elif data.hasFormat('application/x-orb-records'):
event.acceptProposedAction()
return
super(XOrbRecordBox, self).dragEnterEvent(event) | def function[dragEnterEvent, parameter[self, event]]:
constant[
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
]
variable[data] assign[=] call[name[event].mimeData, parameter[]]
if <ast.BoolOp object at 0x7da18c4cd000> begin[:]
variable[tableName] assign[=] call[name[self].tableTypeName, parameter[]]
if compare[call[name[nstr], parameter[call[name[data].data, parameter[constant[application/x-orb-table]]]]] equal[==] name[tableName]] begin[:]
call[name[event].acceptProposedAction, parameter[]]
return[None]
call[call[name[super], parameter[name[XOrbRecordBox], name[self]]].dragEnterEvent, parameter[name[event]]] | keyword[def] identifier[dragEnterEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[data] = identifier[event] . identifier[mimeData] ()
keyword[if] identifier[data] . identifier[hasFormat] ( literal[string] ) keyword[and] identifier[data] . identifier[hasFormat] ( literal[string] ):
identifier[tableName] = identifier[self] . identifier[tableTypeName] ()
keyword[if] identifier[nstr] ( identifier[data] . identifier[data] ( literal[string] ))== identifier[tableName] :
identifier[event] . identifier[acceptProposedAction] ()
keyword[return]
keyword[elif] identifier[data] . identifier[hasFormat] ( literal[string] ):
identifier[event] . identifier[acceptProposedAction] ()
keyword[return]
identifier[super] ( identifier[XOrbRecordBox] , identifier[self] ). identifier[dragEnterEvent] ( identifier[event] ) | def dragEnterEvent(self, event):
"""
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
"""
data = event.mimeData()
if data.hasFormat('application/x-orb-table') and data.hasFormat('application/x-orb-query'):
tableName = self.tableTypeName()
if nstr(data.data('application/x-orb-table')) == tableName:
event.acceptProposedAction()
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif data.hasFormat('application/x-orb-records'):
event.acceptProposedAction()
return # depends on [control=['if'], data=[]]
super(XOrbRecordBox, self).dragEnterEvent(event) |
def print_options(self):
""" print description of the component options
"""
summary = []
for opt_name, opt in self.options.items():
if opt.hidden:
continue
summary.append(opt.summary())
print("\n".join(summary)) | def function[print_options, parameter[self]]:
constant[ print description of the component options
]
variable[summary] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c76c2e0>, <ast.Name object at 0x7da20c76ca60>]]] in starred[call[name[self].options.items, parameter[]]] begin[:]
if name[opt].hidden begin[:]
continue
call[name[summary].append, parameter[call[name[opt].summary, parameter[]]]]
call[name[print], parameter[call[constant[
].join, parameter[name[summary]]]]] | keyword[def] identifier[print_options] ( identifier[self] ):
literal[string]
identifier[summary] =[]
keyword[for] identifier[opt_name] , identifier[opt] keyword[in] identifier[self] . identifier[options] . identifier[items] ():
keyword[if] identifier[opt] . identifier[hidden] :
keyword[continue]
identifier[summary] . identifier[append] ( identifier[opt] . identifier[summary] ())
identifier[print] ( literal[string] . identifier[join] ( identifier[summary] )) | def print_options(self):
""" print description of the component options
"""
summary = []
for (opt_name, opt) in self.options.items():
if opt.hidden:
continue # depends on [control=['if'], data=[]]
summary.append(opt.summary()) # depends on [control=['for'], data=[]]
print('\n'.join(summary)) |
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
"""Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value.
"""
try:
return int(cell)
except ValueError:
pass
try:
return float(cell)
except ValueError:
pass
# TODO Check for dates?
return cell | def function[parse_cell, parameter[self, cell, coords, cell_mode]]:
constant[Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value.
]
<ast.Try object at 0x7da2044c05b0>
<ast.Try object at 0x7da2044c1330>
return[name[cell]] | keyword[def] identifier[parse_cell] ( identifier[self] , identifier[cell] , identifier[coords] , identifier[cell_mode] = identifier[CellMode] . identifier[cooked] ):
literal[string]
keyword[try] :
keyword[return] identifier[int] ( identifier[cell] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[float] ( identifier[cell] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[cell] | def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
"""Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value.
"""
try:
return int(cell) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
try:
return float(cell) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
# TODO Check for dates?
return cell |
def _set_link_oam(self, v, load=False):
"""
Setter method for link_oam, mapped from YANG variable /protocol/link_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_oam() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_oam.link_oam, is_container='container', presence=True, yang_name="link-oam", rest_name="link-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link OAM Protocol configuration mode', u'callpoint': u'setDot3ahEnable', u'sort-priority': u'68', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'config-link-oam'}}, namespace='urn:brocade.com:mgmt:brocade-dot3ah', defining_module='brocade-dot3ah', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_oam must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_oam.link_oam, is_container='container', presence=True, yang_name="link-oam", rest_name="link-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link OAM Protocol configuration mode', u'callpoint': u'setDot3ahEnable', u'sort-priority': u'68', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'config-link-oam'}}, namespace='urn:brocade.com:mgmt:brocade-dot3ah', defining_module='brocade-dot3ah', yang_type='container', is_config=True)""",
})
self.__link_oam = t
if hasattr(self, '_set'):
self._set() | def function[_set_link_oam, parameter[self, v, load]]:
constant[
Setter method for link_oam, mapped from YANG variable /protocol/link_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_oam() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c6c6080>
name[self].__link_oam assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_link_oam] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[link_oam] . identifier[link_oam] , identifier[is_container] = literal[string] , identifier[presence] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__link_oam] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_link_oam(self, v, load=False):
"""
Setter method for link_oam, mapped from YANG variable /protocol/link_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_oam() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=link_oam.link_oam, is_container='container', presence=True, yang_name='link-oam', rest_name='link-oam', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link OAM Protocol configuration mode', u'callpoint': u'setDot3ahEnable', u'sort-priority': u'68', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'config-link-oam'}}, namespace='urn:brocade.com:mgmt:brocade-dot3ah', defining_module='brocade-dot3ah', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'link_oam must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=link_oam.link_oam, is_container=\'container\', presence=True, yang_name="link-oam", rest_name="link-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Link OAM Protocol configuration mode\', u\'callpoint\': u\'setDot3ahEnable\', u\'sort-priority\': u\'68\', u\'cli-full-command\': None, u\'cli-add-mode\': None, u\'cli-full-no\': None, u\'cli-mode-name\': u\'config-link-oam\'}}, namespace=\'urn:brocade.com:mgmt:brocade-dot3ah\', defining_module=\'brocade-dot3ah\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__link_oam = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def normalTriad(phi, theta):
"""
Calculate the so-called normal triad [p, q, r] which is associated with a spherical coordinate system .
The three vectors are:
p - The unit tangent vector in the direction of increasing longitudinal angle phi.
q - The unit tangent vector in the direction of increasing latitudinal angle theta.
r - The unit vector toward the point (phi, theta).
Parameters
----------
phi - longitude-like angle (e.g., right ascension, ecliptic longitude) in radians
theta - latitide-like angle (e.g., declination, ecliptic latitude) in radians
Returns
-------
The normal triad as the vectors p, q, r
"""
sphi=sin(phi)
stheta=sin(theta)
cphi=cos(phi)
ctheta=cos(theta)
p=array([-sphi, cphi, zeros_like(phi)])
q=array([-stheta*cphi, -stheta*sphi, ctheta])
r=array([ctheta*cphi, ctheta*sphi, stheta])
return p, q, r | def function[normalTriad, parameter[phi, theta]]:
constant[
Calculate the so-called normal triad [p, q, r] which is associated with a spherical coordinate system .
The three vectors are:
p - The unit tangent vector in the direction of increasing longitudinal angle phi.
q - The unit tangent vector in the direction of increasing latitudinal angle theta.
r - The unit vector toward the point (phi, theta).
Parameters
----------
phi - longitude-like angle (e.g., right ascension, ecliptic longitude) in radians
theta - latitide-like angle (e.g., declination, ecliptic latitude) in radians
Returns
-------
The normal triad as the vectors p, q, r
]
variable[sphi] assign[=] call[name[sin], parameter[name[phi]]]
variable[stheta] assign[=] call[name[sin], parameter[name[theta]]]
variable[cphi] assign[=] call[name[cos], parameter[name[phi]]]
variable[ctheta] assign[=] call[name[cos], parameter[name[theta]]]
variable[p] assign[=] call[name[array], parameter[list[[<ast.UnaryOp object at 0x7da18f811c30>, <ast.Name object at 0x7da18f812350>, <ast.Call object at 0x7da18f812950>]]]]
variable[q] assign[=] call[name[array], parameter[list[[<ast.BinOp object at 0x7da18f811f60>, <ast.BinOp object at 0x7da18f811000>, <ast.Name object at 0x7da18f8112a0>]]]]
variable[r] assign[=] call[name[array], parameter[list[[<ast.BinOp object at 0x7da18f810d30>, <ast.BinOp object at 0x7da18f810640>, <ast.Name object at 0x7da18f812050>]]]]
return[tuple[[<ast.Name object at 0x7da18f812560>, <ast.Name object at 0x7da18f810250>, <ast.Name object at 0x7da18f811390>]]] | keyword[def] identifier[normalTriad] ( identifier[phi] , identifier[theta] ):
literal[string]
identifier[sphi] = identifier[sin] ( identifier[phi] )
identifier[stheta] = identifier[sin] ( identifier[theta] )
identifier[cphi] = identifier[cos] ( identifier[phi] )
identifier[ctheta] = identifier[cos] ( identifier[theta] )
identifier[p] = identifier[array] ([- identifier[sphi] , identifier[cphi] , identifier[zeros_like] ( identifier[phi] )])
identifier[q] = identifier[array] ([- identifier[stheta] * identifier[cphi] ,- identifier[stheta] * identifier[sphi] , identifier[ctheta] ])
identifier[r] = identifier[array] ([ identifier[ctheta] * identifier[cphi] , identifier[ctheta] * identifier[sphi] , identifier[stheta] ])
keyword[return] identifier[p] , identifier[q] , identifier[r] | def normalTriad(phi, theta):
"""
Calculate the so-called normal triad [p, q, r] which is associated with a spherical coordinate system .
The three vectors are:
p - The unit tangent vector in the direction of increasing longitudinal angle phi.
q - The unit tangent vector in the direction of increasing latitudinal angle theta.
r - The unit vector toward the point (phi, theta).
Parameters
----------
phi - longitude-like angle (e.g., right ascension, ecliptic longitude) in radians
theta - latitide-like angle (e.g., declination, ecliptic latitude) in radians
Returns
-------
The normal triad as the vectors p, q, r
"""
sphi = sin(phi)
stheta = sin(theta)
cphi = cos(phi)
ctheta = cos(theta)
p = array([-sphi, cphi, zeros_like(phi)])
q = array([-stheta * cphi, -stheta * sphi, ctheta])
r = array([ctheta * cphi, ctheta * sphi, stheta])
return (p, q, r) |
def validate_utf8(alleged_utf8, error = None) :
"alleged_utf8 must be null-terminated bytes."
error, my_error = _get_error(error)
result = dbus.dbus_validate_utf8(alleged_utf8, error._dbobj) != 0
my_error.raise_if_set()
return \
result | def function[validate_utf8, parameter[alleged_utf8, error]]:
constant[alleged_utf8 must be null-terminated bytes.]
<ast.Tuple object at 0x7da20c993700> assign[=] call[name[_get_error], parameter[name[error]]]
variable[result] assign[=] compare[call[name[dbus].dbus_validate_utf8, parameter[name[alleged_utf8], name[error]._dbobj]] not_equal[!=] constant[0]]
call[name[my_error].raise_if_set, parameter[]]
return[name[result]] | keyword[def] identifier[validate_utf8] ( identifier[alleged_utf8] , identifier[error] = keyword[None] ):
literal[string]
identifier[error] , identifier[my_error] = identifier[_get_error] ( identifier[error] )
identifier[result] = identifier[dbus] . identifier[dbus_validate_utf8] ( identifier[alleged_utf8] , identifier[error] . identifier[_dbobj] )!= literal[int]
identifier[my_error] . identifier[raise_if_set] ()
keyword[return] identifier[result] | def validate_utf8(alleged_utf8, error=None):
"""alleged_utf8 must be null-terminated bytes."""
(error, my_error) = _get_error(error)
result = dbus.dbus_validate_utf8(alleged_utf8, error._dbobj) != 0
my_error.raise_if_set()
return result |
def add_include(self, name, module_spec):
"""Adds a module as an included module.
:param name:
Name under which the included module should be exposed in the
current module.
:param module_spec:
ModuleSpec of the included module.
"""
assert name, 'name is required'
assert self.can_include
if name in self.includes:
raise ThriftCompilerError(
'Cannot include module "%s" as "%s" in "%s". '
'The name is already taken.'
% (module_spec.name, name, self.path)
)
self.includes[name] = module_spec
self.scope.add_include(name, module_spec.scope, module_spec.surface) | def function[add_include, parameter[self, name, module_spec]]:
constant[Adds a module as an included module.
:param name:
Name under which the included module should be exposed in the
current module.
:param module_spec:
ModuleSpec of the included module.
]
assert[name[name]]
assert[name[self].can_include]
if compare[name[name] in name[self].includes] begin[:]
<ast.Raise object at 0x7da18fe91000>
call[name[self].includes][name[name]] assign[=] name[module_spec]
call[name[self].scope.add_include, parameter[name[name], name[module_spec].scope, name[module_spec].surface]] | keyword[def] identifier[add_include] ( identifier[self] , identifier[name] , identifier[module_spec] ):
literal[string]
keyword[assert] identifier[name] , literal[string]
keyword[assert] identifier[self] . identifier[can_include]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[includes] :
keyword[raise] identifier[ThriftCompilerError] (
literal[string]
literal[string]
%( identifier[module_spec] . identifier[name] , identifier[name] , identifier[self] . identifier[path] )
)
identifier[self] . identifier[includes] [ identifier[name] ]= identifier[module_spec]
identifier[self] . identifier[scope] . identifier[add_include] ( identifier[name] , identifier[module_spec] . identifier[scope] , identifier[module_spec] . identifier[surface] ) | def add_include(self, name, module_spec):
"""Adds a module as an included module.
:param name:
Name under which the included module should be exposed in the
current module.
:param module_spec:
ModuleSpec of the included module.
"""
assert name, 'name is required'
assert self.can_include
if name in self.includes:
raise ThriftCompilerError('Cannot include module "%s" as "%s" in "%s". The name is already taken.' % (module_spec.name, name, self.path)) # depends on [control=['if'], data=['name']]
self.includes[name] = module_spec
self.scope.add_include(name, module_spec.scope, module_spec.surface) |
def plot(self):
"""
Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x.
"""
plt.rcParams['xtick.major.pad'] = '6'
plt.rcParams['ytick.major.pad'] = '6'
plt.rcParams['axes.linewidth'] = 2
npoint = 1000
xs = np.linspace(0, 1, npoint)
# Converts sampling points in self.c1 - self.c2 tie line to those in
# self.comp1 - self.comp2 tie line.
xs_reverse_converted = InterfacialReactivity._reverse_convert(
xs, self.factor1, self.factor2)
energies = [self._get_energy(x) for x in xs_reverse_converted]
plt.plot(xs, energies, 'k-')
# Marks kinks and minimum energy point.
kinks = self.get_kinks()
_, x_kink, energy_kink, _, _ = zip(*kinks)
plt.scatter(x_kink, energy_kink, marker='o', c='blue', s=20)
plt.scatter(self.minimum()[0], self.minimum()[1], marker='*',
c='red', s=300)
# Labels kinks with indices. Labels are made draggable
# in case of overlapping.
for index, x, energy, _, _ in kinks:
plt.annotate(
index,
xy=(x, energy), xytext=(5, 30),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3,rad=0')).draggable()
plt.xlim([-0.05, 1.05])
if self.norm:
plt.ylabel('Energy (eV/atom)')
else:
plt.ylabel('Energy (eV/f.u.)')
plt.xlabel('$x$ in $x$ {} + $(1-x)$ {}'.format(
self.c1.reduced_formula, self.c2.reduced_formula))
return plt | def function[plot, parameter[self]]:
constant[
Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x.
]
call[name[plt].rcParams][constant[xtick.major.pad]] assign[=] constant[6]
call[name[plt].rcParams][constant[ytick.major.pad]] assign[=] constant[6]
call[name[plt].rcParams][constant[axes.linewidth]] assign[=] constant[2]
variable[npoint] assign[=] constant[1000]
variable[xs] assign[=] call[name[np].linspace, parameter[constant[0], constant[1], name[npoint]]]
variable[xs_reverse_converted] assign[=] call[name[InterfacialReactivity]._reverse_convert, parameter[name[xs], name[self].factor1, name[self].factor2]]
variable[energies] assign[=] <ast.ListComp object at 0x7da20c6e7520>
call[name[plt].plot, parameter[name[xs], name[energies], constant[k-]]]
variable[kinks] assign[=] call[name[self].get_kinks, parameter[]]
<ast.Tuple object at 0x7da20c6e5270> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da20c6e5db0>]]
call[name[plt].scatter, parameter[name[x_kink], name[energy_kink]]]
call[name[plt].scatter, parameter[call[call[name[self].minimum, parameter[]]][constant[0]], call[call[name[self].minimum, parameter[]]][constant[1]]]]
for taget[tuple[[<ast.Name object at 0x7da1b26ae6e0>, <ast.Name object at 0x7da1b26ac8b0>, <ast.Name object at 0x7da1b26ae620>, <ast.Name object at 0x7da1b26aded0>, <ast.Name object at 0x7da1b26af7f0>]]] in starred[name[kinks]] begin[:]
call[call[name[plt].annotate, parameter[name[index]]].draggable, parameter[]]
call[name[plt].xlim, parameter[list[[<ast.UnaryOp object at 0x7da1b26aee60>, <ast.Constant object at 0x7da1b26ad0c0>]]]]
if name[self].norm begin[:]
call[name[plt].ylabel, parameter[constant[Energy (eV/atom)]]]
call[name[plt].xlabel, parameter[call[constant[$x$ in $x$ {} + $(1-x)$ {}].format, parameter[name[self].c1.reduced_formula, name[self].c2.reduced_formula]]]]
return[name[plt]] | keyword[def] identifier[plot] ( identifier[self] ):
literal[string]
identifier[plt] . identifier[rcParams] [ literal[string] ]= literal[string]
identifier[plt] . identifier[rcParams] [ literal[string] ]= literal[string]
identifier[plt] . identifier[rcParams] [ literal[string] ]= literal[int]
identifier[npoint] = literal[int]
identifier[xs] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[npoint] )
identifier[xs_reverse_converted] = identifier[InterfacialReactivity] . identifier[_reverse_convert] (
identifier[xs] , identifier[self] . identifier[factor1] , identifier[self] . identifier[factor2] )
identifier[energies] =[ identifier[self] . identifier[_get_energy] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[xs_reverse_converted] ]
identifier[plt] . identifier[plot] ( identifier[xs] , identifier[energies] , literal[string] )
identifier[kinks] = identifier[self] . identifier[get_kinks] ()
identifier[_] , identifier[x_kink] , identifier[energy_kink] , identifier[_] , identifier[_] = identifier[zip] (* identifier[kinks] )
identifier[plt] . identifier[scatter] ( identifier[x_kink] , identifier[energy_kink] , identifier[marker] = literal[string] , identifier[c] = literal[string] , identifier[s] = literal[int] )
identifier[plt] . identifier[scatter] ( identifier[self] . identifier[minimum] ()[ literal[int] ], identifier[self] . identifier[minimum] ()[ literal[int] ], identifier[marker] = literal[string] ,
identifier[c] = literal[string] , identifier[s] = literal[int] )
keyword[for] identifier[index] , identifier[x] , identifier[energy] , identifier[_] , identifier[_] keyword[in] identifier[kinks] :
identifier[plt] . identifier[annotate] (
identifier[index] ,
identifier[xy] =( identifier[x] , identifier[energy] ), identifier[xytext] =( literal[int] , literal[int] ),
identifier[textcoords] = literal[string] , identifier[ha] = literal[string] , identifier[va] = literal[string] ,
identifier[arrowprops] = identifier[dict] ( identifier[arrowstyle] = literal[string] ,
identifier[connectionstyle] = literal[string] )). identifier[draggable] ()
identifier[plt] . identifier[xlim] ([- literal[int] , literal[int] ])
keyword[if] identifier[self] . identifier[norm] :
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[else] :
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] . identifier[format] (
identifier[self] . identifier[c1] . identifier[reduced_formula] , identifier[self] . identifier[c2] . identifier[reduced_formula] ))
keyword[return] identifier[plt] | def plot(self):
"""
Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x.
"""
plt.rcParams['xtick.major.pad'] = '6'
plt.rcParams['ytick.major.pad'] = '6'
plt.rcParams['axes.linewidth'] = 2
npoint = 1000
xs = np.linspace(0, 1, npoint)
# Converts sampling points in self.c1 - self.c2 tie line to those in
# self.comp1 - self.comp2 tie line.
xs_reverse_converted = InterfacialReactivity._reverse_convert(xs, self.factor1, self.factor2)
energies = [self._get_energy(x) for x in xs_reverse_converted]
plt.plot(xs, energies, 'k-')
# Marks kinks and minimum energy point.
kinks = self.get_kinks()
(_, x_kink, energy_kink, _, _) = zip(*kinks)
plt.scatter(x_kink, energy_kink, marker='o', c='blue', s=20)
plt.scatter(self.minimum()[0], self.minimum()[1], marker='*', c='red', s=300)
# Labels kinks with indices. Labels are made draggable
# in case of overlapping.
for (index, x, energy, _, _) in kinks:
plt.annotate(index, xy=(x, energy), xytext=(5, 30), textcoords='offset points', ha='right', va='bottom', arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')).draggable() # depends on [control=['for'], data=[]]
plt.xlim([-0.05, 1.05])
if self.norm:
plt.ylabel('Energy (eV/atom)') # depends on [control=['if'], data=[]]
else:
plt.ylabel('Energy (eV/f.u.)')
plt.xlabel('$x$ in $x$ {} + $(1-x)$ {}'.format(self.c1.reduced_formula, self.c2.reduced_formula))
return plt |
def andrew(S):
"""Convex hull by Andrew
:param S: list of points as coordinate pairs
:requires: S has at least 2 points
:returns: list of points of the convex hull
:complexity: `O(n log n)`
"""
S.sort()
top = []
bot = []
for p in S:
while len(top) >= 2 and not left_turn(p, top[-1], top[-2]):
top.pop()
top.append(p)
while len(bot) >= 2 and not left_turn(bot[-2], bot[-1], p):
bot.pop()
bot.append(p)
return bot[:-1] + top[:0:-1] | def function[andrew, parameter[S]]:
constant[Convex hull by Andrew
:param S: list of points as coordinate pairs
:requires: S has at least 2 points
:returns: list of points of the convex hull
:complexity: `O(n log n)`
]
call[name[S].sort, parameter[]]
variable[top] assign[=] list[[]]
variable[bot] assign[=] list[[]]
for taget[name[p]] in starred[name[S]] begin[:]
while <ast.BoolOp object at 0x7da1b06a3220> begin[:]
call[name[top].pop, parameter[]]
call[name[top].append, parameter[name[p]]]
while <ast.BoolOp object at 0x7da20c992830> begin[:]
call[name[bot].pop, parameter[]]
call[name[bot].append, parameter[name[p]]]
return[binary_operation[call[name[bot]][<ast.Slice object at 0x7da18bccb220>] + call[name[top]][<ast.Slice object at 0x7da18bccbc10>]]] | keyword[def] identifier[andrew] ( identifier[S] ):
literal[string]
identifier[S] . identifier[sort] ()
identifier[top] =[]
identifier[bot] =[]
keyword[for] identifier[p] keyword[in] identifier[S] :
keyword[while] identifier[len] ( identifier[top] )>= literal[int] keyword[and] keyword[not] identifier[left_turn] ( identifier[p] , identifier[top] [- literal[int] ], identifier[top] [- literal[int] ]):
identifier[top] . identifier[pop] ()
identifier[top] . identifier[append] ( identifier[p] )
keyword[while] identifier[len] ( identifier[bot] )>= literal[int] keyword[and] keyword[not] identifier[left_turn] ( identifier[bot] [- literal[int] ], identifier[bot] [- literal[int] ], identifier[p] ):
identifier[bot] . identifier[pop] ()
identifier[bot] . identifier[append] ( identifier[p] )
keyword[return] identifier[bot] [:- literal[int] ]+ identifier[top] [: literal[int] :- literal[int] ] | def andrew(S):
"""Convex hull by Andrew
:param S: list of points as coordinate pairs
:requires: S has at least 2 points
:returns: list of points of the convex hull
:complexity: `O(n log n)`
"""
S.sort()
top = []
bot = []
for p in S:
while len(top) >= 2 and (not left_turn(p, top[-1], top[-2])):
top.pop() # depends on [control=['while'], data=[]]
top.append(p)
while len(bot) >= 2 and (not left_turn(bot[-2], bot[-1], p)):
bot.pop() # depends on [control=['while'], data=[]]
bot.append(p) # depends on [control=['for'], data=['p']]
return bot[:-1] + top[:0:-1] |
def create_node(self, bank, tags=None):
"""
Set up a CondorDagmanNode class to run splitbank code
Parameters
----------
bank : pycbc.workflow.core.File
The File containing the template bank to be split
Returns
--------
node : pycbc.workflow.core.Node
The node to run the job
"""
if tags is None:
tags = []
node = Node(self)
node.add_input_opt('--bank-file', bank)
# Get the output (taken from inspiral.py)
out_files = FileList([])
for i in range( 0, self.num_banks):
curr_tag = 'bank%d' %(i)
# FIXME: What should the tags actually be? The job.tags values are
# currently ignored.
curr_tags = bank.tags + [curr_tag] + tags
job_tag = bank.description + "_" + self.name.upper()
out_file = File(bank.ifo_list, job_tag, bank.segment,
extension=self.extension, directory=self.out_dir,
tags=curr_tags, store_file=self.retain_files)
out_files.append(out_file)
node.add_output_list_opt('--output-filenames', out_files)
return node | def function[create_node, parameter[self, bank, tags]]:
constant[
Set up a CondorDagmanNode class to run splitbank code
Parameters
----------
bank : pycbc.workflow.core.File
The File containing the template bank to be split
Returns
--------
node : pycbc.workflow.core.Node
The node to run the job
]
if compare[name[tags] is constant[None]] begin[:]
variable[tags] assign[=] list[[]]
variable[node] assign[=] call[name[Node], parameter[name[self]]]
call[name[node].add_input_opt, parameter[constant[--bank-file], name[bank]]]
variable[out_files] assign[=] call[name[FileList], parameter[list[[]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[self].num_banks]]] begin[:]
variable[curr_tag] assign[=] binary_operation[constant[bank%d] <ast.Mod object at 0x7da2590d6920> name[i]]
variable[curr_tags] assign[=] binary_operation[binary_operation[name[bank].tags + list[[<ast.Name object at 0x7da18dc06e00>]]] + name[tags]]
variable[job_tag] assign[=] binary_operation[binary_operation[name[bank].description + constant[_]] + call[name[self].name.upper, parameter[]]]
variable[out_file] assign[=] call[name[File], parameter[name[bank].ifo_list, name[job_tag], name[bank].segment]]
call[name[out_files].append, parameter[name[out_file]]]
call[name[node].add_output_list_opt, parameter[constant[--output-filenames], name[out_files]]]
return[name[node]] | keyword[def] identifier[create_node] ( identifier[self] , identifier[bank] , identifier[tags] = keyword[None] ):
literal[string]
keyword[if] identifier[tags] keyword[is] keyword[None] :
identifier[tags] =[]
identifier[node] = identifier[Node] ( identifier[self] )
identifier[node] . identifier[add_input_opt] ( literal[string] , identifier[bank] )
identifier[out_files] = identifier[FileList] ([])
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[num_banks] ):
identifier[curr_tag] = literal[string] %( identifier[i] )
identifier[curr_tags] = identifier[bank] . identifier[tags] +[ identifier[curr_tag] ]+ identifier[tags]
identifier[job_tag] = identifier[bank] . identifier[description] + literal[string] + identifier[self] . identifier[name] . identifier[upper] ()
identifier[out_file] = identifier[File] ( identifier[bank] . identifier[ifo_list] , identifier[job_tag] , identifier[bank] . identifier[segment] ,
identifier[extension] = identifier[self] . identifier[extension] , identifier[directory] = identifier[self] . identifier[out_dir] ,
identifier[tags] = identifier[curr_tags] , identifier[store_file] = identifier[self] . identifier[retain_files] )
identifier[out_files] . identifier[append] ( identifier[out_file] )
identifier[node] . identifier[add_output_list_opt] ( literal[string] , identifier[out_files] )
keyword[return] identifier[node] | def create_node(self, bank, tags=None):
"""
Set up a CondorDagmanNode class to run splitbank code
Parameters
----------
bank : pycbc.workflow.core.File
The File containing the template bank to be split
Returns
--------
node : pycbc.workflow.core.Node
The node to run the job
"""
if tags is None:
tags = [] # depends on [control=['if'], data=['tags']]
node = Node(self)
node.add_input_opt('--bank-file', bank)
# Get the output (taken from inspiral.py)
out_files = FileList([])
for i in range(0, self.num_banks):
curr_tag = 'bank%d' % i
# FIXME: What should the tags actually be? The job.tags values are
# currently ignored.
curr_tags = bank.tags + [curr_tag] + tags
job_tag = bank.description + '_' + self.name.upper()
out_file = File(bank.ifo_list, job_tag, bank.segment, extension=self.extension, directory=self.out_dir, tags=curr_tags, store_file=self.retain_files)
out_files.append(out_file) # depends on [control=['for'], data=['i']]
node.add_output_list_opt('--output-filenames', out_files)
return node |
def distinctBy(iterable, fn):
"""
uniq operation with key selector
"""
s = set()
for i in iterable:
r = fn(i)
if r not in s:
s.add(r)
yield i | def function[distinctBy, parameter[iterable, fn]]:
constant[
uniq operation with key selector
]
variable[s] assign[=] call[name[set], parameter[]]
for taget[name[i]] in starred[name[iterable]] begin[:]
variable[r] assign[=] call[name[fn], parameter[name[i]]]
if compare[name[r] <ast.NotIn object at 0x7da2590d7190> name[s]] begin[:]
call[name[s].add, parameter[name[r]]]
<ast.Yield object at 0x7da1b05bf8e0> | keyword[def] identifier[distinctBy] ( identifier[iterable] , identifier[fn] ):
literal[string]
identifier[s] = identifier[set] ()
keyword[for] identifier[i] keyword[in] identifier[iterable] :
identifier[r] = identifier[fn] ( identifier[i] )
keyword[if] identifier[r] keyword[not] keyword[in] identifier[s] :
identifier[s] . identifier[add] ( identifier[r] )
keyword[yield] identifier[i] | def distinctBy(iterable, fn):
"""
uniq operation with key selector
"""
s = set()
for i in iterable:
r = fn(i)
if r not in s:
s.add(r)
yield i # depends on [control=['if'], data=['r', 's']] # depends on [control=['for'], data=['i']] |
def __args_check(self, envelope, target, modification_code):
""" Method checks arguments, that are specified to the
:meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode`
methods
:param envelope: same as envelope in :meth:`.WMessengerFixedModificationLayer.encode` and \
:meth:`.WMessengerFixedModificationLayer.decode` methods
:param target: same as target in :meth:`.WMessengerFixedModificationLayer.encode` and \
:meth:`.WMessengerFixedModificationLayer.decode` methods
:param modification_code: same as modification_code in \
:meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` \
methods
:return: None
"""
if target is None:
raise RuntimeError('"target" argument must be specified for this layer')
if modification_code is None:
raise RuntimeError('"modification_code" argument must be specified for this layer')
if isinstance(target, WMessengerFixedModificationLayer.Target) is False:
raise TypeError('Invalid "target" argument')
if isinstance(envelope, WMessengerTextEnvelope) is True:
if isinstance(modification_code, str) is False:
raise TypeError('Invalid "modification_code" argument for specified envelope')
elif isinstance(modification_code, bytes) is False:
raise TypeError('Invalid "modification_code" argument for specified envelope') | def function[__args_check, parameter[self, envelope, target, modification_code]]:
constant[ Method checks arguments, that are specified to the
:meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode`
methods
:param envelope: same as envelope in :meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` methods
:param target: same as target in :meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` methods
:param modification_code: same as modification_code in :meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` methods
:return: None
]
if compare[name[target] is constant[None]] begin[:]
<ast.Raise object at 0x7da18ede6b00>
if compare[name[modification_code] is constant[None]] begin[:]
<ast.Raise object at 0x7da18ede7eb0>
if compare[call[name[isinstance], parameter[name[target], name[WMessengerFixedModificationLayer].Target]] is constant[False]] begin[:]
<ast.Raise object at 0x7da18ede7940>
if compare[call[name[isinstance], parameter[name[envelope], name[WMessengerTextEnvelope]]] is constant[True]] begin[:]
if compare[call[name[isinstance], parameter[name[modification_code], name[str]]] is constant[False]] begin[:]
<ast.Raise object at 0x7da18ede52a0> | keyword[def] identifier[__args_check] ( identifier[self] , identifier[envelope] , identifier[target] , identifier[modification_code] ):
literal[string]
keyword[if] identifier[target] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[modification_code] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[target] , identifier[WMessengerFixedModificationLayer] . identifier[Target] ) keyword[is] keyword[False] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[envelope] , identifier[WMessengerTextEnvelope] ) keyword[is] keyword[True] :
keyword[if] identifier[isinstance] ( identifier[modification_code] , identifier[str] ) keyword[is] keyword[False] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[modification_code] , identifier[bytes] ) keyword[is] keyword[False] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def __args_check(self, envelope, target, modification_code):
""" Method checks arguments, that are specified to the
:meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode`
methods
:param envelope: same as envelope in :meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` methods
:param target: same as target in :meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` methods
:param modification_code: same as modification_code in :meth:`.WMessengerFixedModificationLayer.encode` and :meth:`.WMessengerFixedModificationLayer.decode` methods
:return: None
"""
if target is None:
raise RuntimeError('"target" argument must be specified for this layer') # depends on [control=['if'], data=[]]
if modification_code is None:
raise RuntimeError('"modification_code" argument must be specified for this layer') # depends on [control=['if'], data=[]]
if isinstance(target, WMessengerFixedModificationLayer.Target) is False:
raise TypeError('Invalid "target" argument') # depends on [control=['if'], data=[]]
if isinstance(envelope, WMessengerTextEnvelope) is True:
if isinstance(modification_code, str) is False:
raise TypeError('Invalid "modification_code" argument for specified envelope') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(modification_code, bytes) is False:
raise TypeError('Invalid "modification_code" argument for specified envelope') # depends on [control=['if'], data=[]] |
def is_directory(self, path):
"""Return True if <path> is a directory, False if it's NOT a directory"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-d', self._full_hdfs_path(path)]) == 0 | def function[is_directory, parameter[self, path]]:
constant[Return True if <path> is a directory, False if it's NOT a directory]
return[compare[call[name[self]._getReturnCodeCmd, parameter[list[[<ast.Attribute object at 0x7da1b08faa10>, <ast.Constant object at 0x7da1b08fab90>, <ast.Constant object at 0x7da1b08faaa0>, <ast.Constant object at 0x7da1b08faad0>, <ast.Call object at 0x7da1b08faa40>]]]] equal[==] constant[0]]] | keyword[def] identifier[is_directory] ( identifier[self] , identifier[path] ):
literal[string]
keyword[return] identifier[self] . identifier[_getReturnCodeCmd] ([ identifier[self] . identifier[_hadoop_cmd] , literal[string] , literal[string] , literal[string] , identifier[self] . identifier[_full_hdfs_path] ( identifier[path] )])== literal[int] | def is_directory(self, path):
"""Return True if <path> is a directory, False if it's NOT a directory"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-d', self._full_hdfs_path(path)]) == 0 |
def typify(self, stack: Stack) -> List[Stack]:
"""
Appends a typing stack after the given stack, but only if required
(aka don't have two typing layers following each other).
"""
if len(stack.layers) == 1 and isinstance(stack.layers[0], lyr.Typing):
return [stack]
return [stack, Stack([lyr.Typing()])] | def function[typify, parameter[self, stack]]:
constant[
Appends a typing stack after the given stack, but only if required
(aka don't have two typing layers following each other).
]
if <ast.BoolOp object at 0x7da18dc99420> begin[:]
return[list[[<ast.Name object at 0x7da18dc9b280>]]]
return[list[[<ast.Name object at 0x7da18dc9a470>, <ast.Call object at 0x7da18dc9a050>]]] | keyword[def] identifier[typify] ( identifier[self] , identifier[stack] : identifier[Stack] )-> identifier[List] [ identifier[Stack] ]:
literal[string]
keyword[if] identifier[len] ( identifier[stack] . identifier[layers] )== literal[int] keyword[and] identifier[isinstance] ( identifier[stack] . identifier[layers] [ literal[int] ], identifier[lyr] . identifier[Typing] ):
keyword[return] [ identifier[stack] ]
keyword[return] [ identifier[stack] , identifier[Stack] ([ identifier[lyr] . identifier[Typing] ()])] | def typify(self, stack: Stack) -> List[Stack]:
"""
Appends a typing stack after the given stack, but only if required
(aka don't have two typing layers following each other).
"""
if len(stack.layers) == 1 and isinstance(stack.layers[0], lyr.Typing):
return [stack] # depends on [control=['if'], data=[]]
return [stack, Stack([lyr.Typing()])] |
def _read_response(self, response):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
"""
self.name = response['name']
self.description = response['description']
self.autoJoin = response['autoJoin']
self.realm = response['realm']
self.realmAttributes = response.get('realmAttributes', None) | def function[_read_response, parameter[self, response]]:
constant[
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
]
name[self].name assign[=] call[name[response]][constant[name]]
name[self].description assign[=] call[name[response]][constant[description]]
name[self].autoJoin assign[=] call[name[response]][constant[autoJoin]]
name[self].realm assign[=] call[name[response]][constant[realm]]
name[self].realmAttributes assign[=] call[name[response].get, parameter[constant[realmAttributes], constant[None]]] | keyword[def] identifier[_read_response] ( identifier[self] , identifier[response] ):
literal[string]
identifier[self] . identifier[name] = identifier[response] [ literal[string] ]
identifier[self] . identifier[description] = identifier[response] [ literal[string] ]
identifier[self] . identifier[autoJoin] = identifier[response] [ literal[string] ]
identifier[self] . identifier[realm] = identifier[response] [ literal[string] ]
identifier[self] . identifier[realmAttributes] = identifier[response] . identifier[get] ( literal[string] , keyword[None] ) | def _read_response(self, response):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Security+Configuration+JSON
"""
self.name = response['name']
self.description = response['description']
self.autoJoin = response['autoJoin']
self.realm = response['realm']
self.realmAttributes = response.get('realmAttributes', None) |
def parse_meta(self, meta):
"""Parses the meta field in the message, copies it's keys into a new
dict and replaces the values, which should be attribute paths relative
to the passed in object, with the current value at the end of that
path. This function will run recursively when it encounters other dicts
inside the meta dict.
Args:
meta (dict):
The dictionary of mappings to pull structure of the meta from.
Returns:
dict:
A copy of the keys from the meta dict with the values pulled
from the paths.
"""
res = {}
for key, val in meta.items():
if not val:
continue
elif isinstance(val, dict):
res[key] = self.parse_meta(val)
elif val.startswith('current_user.'):
res[key] = self.get_path_attribute(current_user, val)
elif val.startswith('original.'):
res[key] = self.get_path_attribute(self.get_original(), val)
else:
res[key] = self.get_path_attribute(self, val)
return res | def function[parse_meta, parameter[self, meta]]:
constant[Parses the meta field in the message, copies it's keys into a new
dict and replaces the values, which should be attribute paths relative
to the passed in object, with the current value at the end of that
path. This function will run recursively when it encounters other dicts
inside the meta dict.
Args:
meta (dict):
The dictionary of mappings to pull structure of the meta from.
Returns:
dict:
A copy of the keys from the meta dict with the values pulled
from the paths.
]
variable[res] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18f720d90>, <ast.Name object at 0x7da18f7232e0>]]] in starred[call[name[meta].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18f720be0> begin[:]
continue
return[name[res]] | keyword[def] identifier[parse_meta] ( identifier[self] , identifier[meta] ):
literal[string]
identifier[res] ={}
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[meta] . identifier[items] ():
keyword[if] keyword[not] identifier[val] :
keyword[continue]
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[dict] ):
identifier[res] [ identifier[key] ]= identifier[self] . identifier[parse_meta] ( identifier[val] )
keyword[elif] identifier[val] . identifier[startswith] ( literal[string] ):
identifier[res] [ identifier[key] ]= identifier[self] . identifier[get_path_attribute] ( identifier[current_user] , identifier[val] )
keyword[elif] identifier[val] . identifier[startswith] ( literal[string] ):
identifier[res] [ identifier[key] ]= identifier[self] . identifier[get_path_attribute] ( identifier[self] . identifier[get_original] (), identifier[val] )
keyword[else] :
identifier[res] [ identifier[key] ]= identifier[self] . identifier[get_path_attribute] ( identifier[self] , identifier[val] )
keyword[return] identifier[res] | def parse_meta(self, meta):
"""Parses the meta field in the message, copies it's keys into a new
dict and replaces the values, which should be attribute paths relative
to the passed in object, with the current value at the end of that
path. This function will run recursively when it encounters other dicts
inside the meta dict.
Args:
meta (dict):
The dictionary of mappings to pull structure of the meta from.
Returns:
dict:
A copy of the keys from the meta dict with the values pulled
from the paths.
"""
res = {}
for (key, val) in meta.items():
if not val:
continue # depends on [control=['if'], data=[]]
elif isinstance(val, dict):
res[key] = self.parse_meta(val) # depends on [control=['if'], data=[]]
elif val.startswith('current_user.'):
res[key] = self.get_path_attribute(current_user, val) # depends on [control=['if'], data=[]]
elif val.startswith('original.'):
res[key] = self.get_path_attribute(self.get_original(), val) # depends on [control=['if'], data=[]]
else:
res[key] = self.get_path_attribute(self, val) # depends on [control=['for'], data=[]]
return res |
def create_archive_dir(self):
"""
Create the archive dir
"""
archive_dir = os.path.join(self.tmp_dir, self.archive_name)
os.makedirs(archive_dir, 0o700)
return archive_dir | def function[create_archive_dir, parameter[self]]:
constant[
Create the archive dir
]
variable[archive_dir] assign[=] call[name[os].path.join, parameter[name[self].tmp_dir, name[self].archive_name]]
call[name[os].makedirs, parameter[name[archive_dir], constant[448]]]
return[name[archive_dir]] | keyword[def] identifier[create_archive_dir] ( identifier[self] ):
literal[string]
identifier[archive_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[tmp_dir] , identifier[self] . identifier[archive_name] )
identifier[os] . identifier[makedirs] ( identifier[archive_dir] , literal[int] )
keyword[return] identifier[archive_dir] | def create_archive_dir(self):
"""
Create the archive dir
"""
archive_dir = os.path.join(self.tmp_dir, self.archive_name)
os.makedirs(archive_dir, 448)
return archive_dir |
def supports(self, config, context):
"""
Check whether we are in a Flask request context.
:param config: honeybadger configuration.
:param context: current honeybadger configuration.
:return: True if this is a django request, False else.
"""
try:
from flask import request
except ImportError:
return False
else:
return bool(request) | def function[supports, parameter[self, config, context]]:
constant[
Check whether we are in a Flask request context.
:param config: honeybadger configuration.
:param context: current honeybadger configuration.
:return: True if this is a django request, False else.
]
<ast.Try object at 0x7da18ede74c0> | keyword[def] identifier[supports] ( identifier[self] , identifier[config] , identifier[context] ):
literal[string]
keyword[try] :
keyword[from] identifier[flask] keyword[import] identifier[request]
keyword[except] identifier[ImportError] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[bool] ( identifier[request] ) | def supports(self, config, context):
"""
Check whether we are in a Flask request context.
:param config: honeybadger configuration.
:param context: current honeybadger configuration.
:return: True if this is a django request, False else.
"""
try:
from flask import request # depends on [control=['try'], data=[]]
except ImportError:
return False # depends on [control=['except'], data=[]]
else:
return bool(request) |
def __remove_pyc_pyo(fname):
"""Eventually remove .pyc and .pyo files associated to a Python script"""
if osp.splitext(fname)[1] == '.py':
for ending in ('c', 'o'):
if osp.exists(fname+ending):
os.remove(fname+ending) | def function[__remove_pyc_pyo, parameter[fname]]:
constant[Eventually remove .pyc and .pyo files associated to a Python script]
if compare[call[call[name[osp].splitext, parameter[name[fname]]]][constant[1]] equal[==] constant[.py]] begin[:]
for taget[name[ending]] in starred[tuple[[<ast.Constant object at 0x7da18f7221a0>, <ast.Constant object at 0x7da18f7222f0>]]] begin[:]
if call[name[osp].exists, parameter[binary_operation[name[fname] + name[ending]]]] begin[:]
call[name[os].remove, parameter[binary_operation[name[fname] + name[ending]]]] | keyword[def] identifier[__remove_pyc_pyo] ( identifier[fname] ):
literal[string]
keyword[if] identifier[osp] . identifier[splitext] ( identifier[fname] )[ literal[int] ]== literal[string] :
keyword[for] identifier[ending] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[osp] . identifier[exists] ( identifier[fname] + identifier[ending] ):
identifier[os] . identifier[remove] ( identifier[fname] + identifier[ending] ) | def __remove_pyc_pyo(fname):
"""Eventually remove .pyc and .pyo files associated to a Python script"""
if osp.splitext(fname)[1] == '.py':
for ending in ('c', 'o'):
if osp.exists(fname + ending):
os.remove(fname + ending) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ending']] # depends on [control=['if'], data=[]] |
def whoami(*args, **kwargs):
"""
Prints information about the current user.
Assumes the user is already logged-in.
"""
user = client.whoami()
if user:
print_user(user)
else:
print('You are not logged-in.') | def function[whoami, parameter[]]:
constant[
Prints information about the current user.
Assumes the user is already logged-in.
]
variable[user] assign[=] call[name[client].whoami, parameter[]]
if name[user] begin[:]
call[name[print_user], parameter[name[user]]] | keyword[def] identifier[whoami] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[user] = identifier[client] . identifier[whoami] ()
keyword[if] identifier[user] :
identifier[print_user] ( identifier[user] )
keyword[else] :
identifier[print] ( literal[string] ) | def whoami(*args, **kwargs):
"""
Prints information about the current user.
Assumes the user is already logged-in.
"""
user = client.whoami()
if user:
print_user(user) # depends on [control=['if'], data=[]]
else:
print('You are not logged-in.') |
def unique_row(array, use_columns=None, selected_columns_only=False):
'''Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row.
The returned array can have all columns of the original array or only the columns defined in use_columns.
Parameters
----------
array : numpy.ndarray
use_columns : list
Index of columns to be used to define a unique row
selected_columns_only : bool
If true only the columns defined in use_columns are returned
Returns
-------
numpy.ndarray
'''
if array.dtype.names is None: # normal array has no named dtype
if use_columns is not None:
a_cut = array[:, use_columns]
else:
a_cut = array
if len(use_columns) > 1:
b = np.ascontiguousarray(a_cut).view(np.dtype((np.void, a_cut.dtype.itemsize * a_cut.shape[1])))
else:
b = np.ascontiguousarray(a_cut)
_, index = np.unique(b, return_index=True)
if not selected_columns_only:
return array[np.sort(index)] # sort to preserve order
else:
return a_cut[np.sort(index)] # sort to preserve order
else: # names for dtype founnd --> array is recarray
names = list(array.dtype.names)
if use_columns is not None:
new_names = [names[i] for i in use_columns]
else:
new_names = names
a_cut, index = np.unique(array[new_names], return_index=True)
if not selected_columns_only:
return array[np.sort(index)] # sort to preserve order
else:
return array[np.sort(index)][new_names] | def function[unique_row, parameter[array, use_columns, selected_columns_only]]:
constant[Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row.
The returned array can have all columns of the original array or only the columns defined in use_columns.
Parameters
----------
array : numpy.ndarray
use_columns : list
Index of columns to be used to define a unique row
selected_columns_only : bool
If true only the columns defined in use_columns are returned
Returns
-------
numpy.ndarray
]
if compare[name[array].dtype.names is constant[None]] begin[:]
if compare[name[use_columns] is_not constant[None]] begin[:]
variable[a_cut] assign[=] call[name[array]][tuple[[<ast.Slice object at 0x7da1b11a3af0>, <ast.Name object at 0x7da1b11a3ac0>]]]
if compare[call[name[len], parameter[name[use_columns]]] greater[>] constant[1]] begin[:]
variable[b] assign[=] call[call[name[np].ascontiguousarray, parameter[name[a_cut]]].view, parameter[call[name[np].dtype, parameter[tuple[[<ast.Attribute object at 0x7da1b11dba30>, <ast.BinOp object at 0x7da1b11dbc70>]]]]]]
<ast.Tuple object at 0x7da1b11db880> assign[=] call[name[np].unique, parameter[name[b]]]
if <ast.UnaryOp object at 0x7da1b11d9c30> begin[:]
return[call[name[array]][call[name[np].sort, parameter[name[index]]]]] | keyword[def] identifier[unique_row] ( identifier[array] , identifier[use_columns] = keyword[None] , identifier[selected_columns_only] = keyword[False] ):
literal[string]
keyword[if] identifier[array] . identifier[dtype] . identifier[names] keyword[is] keyword[None] :
keyword[if] identifier[use_columns] keyword[is] keyword[not] keyword[None] :
identifier[a_cut] = identifier[array] [:, identifier[use_columns] ]
keyword[else] :
identifier[a_cut] = identifier[array]
keyword[if] identifier[len] ( identifier[use_columns] )> literal[int] :
identifier[b] = identifier[np] . identifier[ascontiguousarray] ( identifier[a_cut] ). identifier[view] ( identifier[np] . identifier[dtype] (( identifier[np] . identifier[void] , identifier[a_cut] . identifier[dtype] . identifier[itemsize] * identifier[a_cut] . identifier[shape] [ literal[int] ])))
keyword[else] :
identifier[b] = identifier[np] . identifier[ascontiguousarray] ( identifier[a_cut] )
identifier[_] , identifier[index] = identifier[np] . identifier[unique] ( identifier[b] , identifier[return_index] = keyword[True] )
keyword[if] keyword[not] identifier[selected_columns_only] :
keyword[return] identifier[array] [ identifier[np] . identifier[sort] ( identifier[index] )]
keyword[else] :
keyword[return] identifier[a_cut] [ identifier[np] . identifier[sort] ( identifier[index] )]
keyword[else] :
identifier[names] = identifier[list] ( identifier[array] . identifier[dtype] . identifier[names] )
keyword[if] identifier[use_columns] keyword[is] keyword[not] keyword[None] :
identifier[new_names] =[ identifier[names] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[use_columns] ]
keyword[else] :
identifier[new_names] = identifier[names]
identifier[a_cut] , identifier[index] = identifier[np] . identifier[unique] ( identifier[array] [ identifier[new_names] ], identifier[return_index] = keyword[True] )
keyword[if] keyword[not] identifier[selected_columns_only] :
keyword[return] identifier[array] [ identifier[np] . identifier[sort] ( identifier[index] )]
keyword[else] :
keyword[return] identifier[array] [ identifier[np] . identifier[sort] ( identifier[index] )][ identifier[new_names] ] | def unique_row(array, use_columns=None, selected_columns_only=False):
"""Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row.
The returned array can have all columns of the original array or only the columns defined in use_columns.
Parameters
----------
array : numpy.ndarray
use_columns : list
Index of columns to be used to define a unique row
selected_columns_only : bool
If true only the columns defined in use_columns are returned
Returns
-------
numpy.ndarray
"""
if array.dtype.names is None: # normal array has no named dtype
if use_columns is not None:
a_cut = array[:, use_columns] # depends on [control=['if'], data=['use_columns']]
else:
a_cut = array
if len(use_columns) > 1:
b = np.ascontiguousarray(a_cut).view(np.dtype((np.void, a_cut.dtype.itemsize * a_cut.shape[1]))) # depends on [control=['if'], data=[]]
else:
b = np.ascontiguousarray(a_cut)
(_, index) = np.unique(b, return_index=True)
if not selected_columns_only:
return array[np.sort(index)] # sort to preserve order # depends on [control=['if'], data=[]]
else:
return a_cut[np.sort(index)] # sort to preserve order # depends on [control=['if'], data=[]]
else: # names for dtype founnd --> array is recarray
names = list(array.dtype.names)
if use_columns is not None:
new_names = [names[i] for i in use_columns] # depends on [control=['if'], data=['use_columns']]
else:
new_names = names
(a_cut, index) = np.unique(array[new_names], return_index=True)
if not selected_columns_only:
return array[np.sort(index)] # sort to preserve order # depends on [control=['if'], data=[]]
else:
return array[np.sort(index)][new_names] |
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1) | def function[help, parameter[]]:
constant[Print this help]
call[name[safe_print], parameter[constant[Run "make [-p <PYTHON>] <target>" where <target> is one of:]]]
for taget[name[name]] in starred[call[name[sorted], parameter[name[_cmds]]]] begin[:]
call[name[safe_print], parameter[binary_operation[constant[ %-20s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da2041d9930>, <ast.BoolOp object at 0x7da2041daf50>]]]]]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[help] ():
literal[string]
identifier[safe_print] ( literal[string] )
keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[_cmds] ):
identifier[safe_print] (
literal[string] %( identifier[name] . identifier[replace] ( literal[string] , literal[string] ), identifier[_cmds] [ identifier[name] ] keyword[or] literal[string] ))
identifier[sys] . identifier[exit] ( literal[int] ) | def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(' %-20s %s' % (name.replace('_', '-'), _cmds[name] or '')) # depends on [control=['for'], data=['name']]
sys.exit(1) |
def delta(self):
"""
Variance ratio between ``K`` and ``I``.
"""
v = float(self._logistic.value)
if v > 0.0:
v = 1 / (1 + exp(-v))
else:
v = exp(v)
v = v / (v + 1.0)
return min(max(v, epsilon.tiny), 1 - epsilon.tiny) | def function[delta, parameter[self]]:
constant[
Variance ratio between ``K`` and ``I``.
]
variable[v] assign[=] call[name[float], parameter[name[self]._logistic.value]]
if compare[name[v] greater[>] constant[0.0]] begin[:]
variable[v] assign[=] binary_operation[constant[1] / binary_operation[constant[1] + call[name[exp], parameter[<ast.UnaryOp object at 0x7da1affc2290>]]]]
return[call[name[min], parameter[call[name[max], parameter[name[v], name[epsilon].tiny]], binary_operation[constant[1] - name[epsilon].tiny]]]] | keyword[def] identifier[delta] ( identifier[self] ):
literal[string]
identifier[v] = identifier[float] ( identifier[self] . identifier[_logistic] . identifier[value] )
keyword[if] identifier[v] > literal[int] :
identifier[v] = literal[int] /( literal[int] + identifier[exp] (- identifier[v] ))
keyword[else] :
identifier[v] = identifier[exp] ( identifier[v] )
identifier[v] = identifier[v] /( identifier[v] + literal[int] )
keyword[return] identifier[min] ( identifier[max] ( identifier[v] , identifier[epsilon] . identifier[tiny] ), literal[int] - identifier[epsilon] . identifier[tiny] ) | def delta(self):
"""
Variance ratio between ``K`` and ``I``.
"""
v = float(self._logistic.value)
if v > 0.0:
v = 1 / (1 + exp(-v)) # depends on [control=['if'], data=['v']]
else:
v = exp(v)
v = v / (v + 1.0)
return min(max(v, epsilon.tiny), 1 - epsilon.tiny) |
def setClockFormat(clockFormat, **kwargs):
'''
Set the clock format, either 12h or 24h format.
CLI Example:
.. code-block:: bash
salt '*' gnome.setClockFormat <12h|24h> user=<username>
'''
if clockFormat != '12h' and clockFormat != '24h':
return False
_gsession = _GSettings(user=kwargs.get('user'),
schema='org.gnome.desktop.interface',
key='clock-format')
return _gsession._set(clockFormat) | def function[setClockFormat, parameter[clockFormat]]:
constant[
Set the clock format, either 12h or 24h format.
CLI Example:
.. code-block:: bash
salt '*' gnome.setClockFormat <12h|24h> user=<username>
]
if <ast.BoolOp object at 0x7da1b21a0b20> begin[:]
return[constant[False]]
variable[_gsession] assign[=] call[name[_GSettings], parameter[]]
return[call[name[_gsession]._set, parameter[name[clockFormat]]]] | keyword[def] identifier[setClockFormat] ( identifier[clockFormat] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[clockFormat] != literal[string] keyword[and] identifier[clockFormat] != literal[string] :
keyword[return] keyword[False]
identifier[_gsession] = identifier[_GSettings] ( identifier[user] = identifier[kwargs] . identifier[get] ( literal[string] ),
identifier[schema] = literal[string] ,
identifier[key] = literal[string] )
keyword[return] identifier[_gsession] . identifier[_set] ( identifier[clockFormat] ) | def setClockFormat(clockFormat, **kwargs):
"""
Set the clock format, either 12h or 24h format.
CLI Example:
.. code-block:: bash
salt '*' gnome.setClockFormat <12h|24h> user=<username>
"""
if clockFormat != '12h' and clockFormat != '24h':
return False # depends on [control=['if'], data=[]]
_gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format')
return _gsession._set(clockFormat) |
def save_model(self, directory=None, append_timestep=True):
"""
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory (str): Optional checkpoint directory.
append_timestep (bool): Appends the current timestep to the checkpoint file if true.
If this is set to True, the load path must include the checkpoint timestep suffix.
For example, if stored to models/ and set to true, the exported file will be of the
form models/model.ckpt-X where X is the last timestep saved. The load path must
precisely match this file name. If this option is turned off, the checkpoint will
always overwrite the file specified in path and the model can always be loaded under
this path.
Returns:
Checkpoint path were the model was saved.
"""
return self.model.save(directory=directory, append_timestep=append_timestep) | def function[save_model, parameter[self, directory, append_timestep]]:
constant[
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory (str): Optional checkpoint directory.
append_timestep (bool): Appends the current timestep to the checkpoint file if true.
If this is set to True, the load path must include the checkpoint timestep suffix.
For example, if stored to models/ and set to true, the exported file will be of the
form models/model.ckpt-X where X is the last timestep saved. The load path must
precisely match this file name. If this option is turned off, the checkpoint will
always overwrite the file specified in path and the model can always be loaded under
this path.
Returns:
Checkpoint path were the model was saved.
]
return[call[name[self].model.save, parameter[]]] | keyword[def] identifier[save_model] ( identifier[self] , identifier[directory] = keyword[None] , identifier[append_timestep] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[model] . identifier[save] ( identifier[directory] = identifier[directory] , identifier[append_timestep] = identifier[append_timestep] ) | def save_model(self, directory=None, append_timestep=True):
"""
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory (str): Optional checkpoint directory.
append_timestep (bool): Appends the current timestep to the checkpoint file if true.
If this is set to True, the load path must include the checkpoint timestep suffix.
For example, if stored to models/ and set to true, the exported file will be of the
form models/model.ckpt-X where X is the last timestep saved. The load path must
precisely match this file name. If this option is turned off, the checkpoint will
always overwrite the file specified in path and the model can always be loaded under
this path.
Returns:
Checkpoint path were the model was saved.
"""
return self.model.save(directory=directory, append_timestep=append_timestep) |
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs) | def function[viewitems, parameter[obj]]:
constant[
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method.]
variable[func] assign[=] call[name[getattr], parameter[name[obj], constant[viewitems], constant[None]]]
if <ast.UnaryOp object at 0x7da20c9935b0> begin[:]
variable[func] assign[=] name[obj].items
return[call[name[func], parameter[]]] | keyword[def] identifier[viewitems] ( identifier[obj] ,** identifier[kwargs] ):
literal[string]
identifier[func] = identifier[getattr] ( identifier[obj] , literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[func] :
identifier[func] = identifier[obj] . identifier[items]
keyword[return] identifier[func] (** identifier[kwargs] ) | def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, 'viewitems', None)
if not func:
func = obj.items # depends on [control=['if'], data=[]]
return func(**kwargs) |
def disable_hardware_breakpoint(self, dwThreadId, address):
"""
Disables the hardware breakpoint at the given address.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint}
L{enable_one_shot_hardware_breakpoint},
L{erase_hardware_breakpoint},
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
t = self.system.get_thread(dwThreadId)
p = t.get_process()
bp = self.get_hardware_breakpoint(dwThreadId, address)
if bp.is_running():
self.__del_running_bp(dwThreadId, bp)
bp.disable(p, t) | def function[disable_hardware_breakpoint, parameter[self, dwThreadId, address]]:
constant[
Disables the hardware breakpoint at the given address.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint}
L{enable_one_shot_hardware_breakpoint},
L{erase_hardware_breakpoint},
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
]
variable[t] assign[=] call[name[self].system.get_thread, parameter[name[dwThreadId]]]
variable[p] assign[=] call[name[t].get_process, parameter[]]
variable[bp] assign[=] call[name[self].get_hardware_breakpoint, parameter[name[dwThreadId], name[address]]]
if call[name[bp].is_running, parameter[]] begin[:]
call[name[self].__del_running_bp, parameter[name[dwThreadId], name[bp]]]
call[name[bp].disable, parameter[name[p], name[t]]] | keyword[def] identifier[disable_hardware_breakpoint] ( identifier[self] , identifier[dwThreadId] , identifier[address] ):
literal[string]
identifier[t] = identifier[self] . identifier[system] . identifier[get_thread] ( identifier[dwThreadId] )
identifier[p] = identifier[t] . identifier[get_process] ()
identifier[bp] = identifier[self] . identifier[get_hardware_breakpoint] ( identifier[dwThreadId] , identifier[address] )
keyword[if] identifier[bp] . identifier[is_running] ():
identifier[self] . identifier[__del_running_bp] ( identifier[dwThreadId] , identifier[bp] )
identifier[bp] . identifier[disable] ( identifier[p] , identifier[t] ) | def disable_hardware_breakpoint(self, dwThreadId, address):
"""
Disables the hardware breakpoint at the given address.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint}
L{enable_one_shot_hardware_breakpoint},
L{erase_hardware_breakpoint},
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
t = self.system.get_thread(dwThreadId)
p = t.get_process()
bp = self.get_hardware_breakpoint(dwThreadId, address)
if bp.is_running():
self.__del_running_bp(dwThreadId, bp) # depends on [control=['if'], data=[]]
bp.disable(p, t) |
def run(task=None, loop=None):
'''Run the event loop forever or until the task/future *task* is finished.
:param task:
Optional. Task or Future which is run until complete. If parameter is
``None`` runs the event loop forever.
:param loop:
Optional. Event loop to use. If the parameter is ``None`` uses
asyncio's base event loop.
.. note::
This method has the same intent as :func:`aiomas.util.run`.
'''
if loop is None:
loop = asyncio.get_event_loop()
if task is None:
return loop.run_forever()
else:
return loop.run_until_complete(task) | def function[run, parameter[task, loop]]:
constant[Run the event loop forever or until the task/future *task* is finished.
:param task:
Optional. Task or Future which is run until complete. If parameter is
``None`` runs the event loop forever.
:param loop:
Optional. Event loop to use. If the parameter is ``None`` uses
asyncio's base event loop.
.. note::
This method has the same intent as :func:`aiomas.util.run`.
]
if compare[name[loop] is constant[None]] begin[:]
variable[loop] assign[=] call[name[asyncio].get_event_loop, parameter[]]
if compare[name[task] is constant[None]] begin[:]
return[call[name[loop].run_forever, parameter[]]] | keyword[def] identifier[run] ( identifier[task] = keyword[None] , identifier[loop] = keyword[None] ):
literal[string]
keyword[if] identifier[loop] keyword[is] keyword[None] :
identifier[loop] = identifier[asyncio] . identifier[get_event_loop] ()
keyword[if] identifier[task] keyword[is] keyword[None] :
keyword[return] identifier[loop] . identifier[run_forever] ()
keyword[else] :
keyword[return] identifier[loop] . identifier[run_until_complete] ( identifier[task] ) | def run(task=None, loop=None):
"""Run the event loop forever or until the task/future *task* is finished.
:param task:
Optional. Task or Future which is run until complete. If parameter is
``None`` runs the event loop forever.
:param loop:
Optional. Event loop to use. If the parameter is ``None`` uses
asyncio's base event loop.
.. note::
This method has the same intent as :func:`aiomas.util.run`.
"""
if loop is None:
loop = asyncio.get_event_loop() # depends on [control=['if'], data=['loop']]
if task is None:
return loop.run_forever() # depends on [control=['if'], data=[]]
else:
return loop.run_until_complete(task) |
def keys_to_string(data):
"""
Function to convert all the unicode keys in string keys
"""
if isinstance(data, dict):
for key in list(data.keys()):
if isinstance(key, six.string_types):
value = data[key]
val = keys_to_string(value)
del data[key]
data[key.encode("utf8", "ignore")] = val
return data | def function[keys_to_string, parameter[data]]:
constant[
Function to convert all the unicode keys in string keys
]
if call[name[isinstance], parameter[name[data], name[dict]]] begin[:]
for taget[name[key]] in starred[call[name[list], parameter[call[name[data].keys, parameter[]]]]] begin[:]
if call[name[isinstance], parameter[name[key], name[six].string_types]] begin[:]
variable[value] assign[=] call[name[data]][name[key]]
variable[val] assign[=] call[name[keys_to_string], parameter[name[value]]]
<ast.Delete object at 0x7da18eb55870>
call[name[data]][call[name[key].encode, parameter[constant[utf8], constant[ignore]]]] assign[=] name[val]
return[name[data]] | keyword[def] identifier[keys_to_string] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[data] . identifier[keys] ()):
keyword[if] identifier[isinstance] ( identifier[key] , identifier[six] . identifier[string_types] ):
identifier[value] = identifier[data] [ identifier[key] ]
identifier[val] = identifier[keys_to_string] ( identifier[value] )
keyword[del] identifier[data] [ identifier[key] ]
identifier[data] [ identifier[key] . identifier[encode] ( literal[string] , literal[string] )]= identifier[val]
keyword[return] identifier[data] | def keys_to_string(data):
"""
Function to convert all the unicode keys in string keys
"""
if isinstance(data, dict):
for key in list(data.keys()):
if isinstance(key, six.string_types):
value = data[key]
val = keys_to_string(value)
del data[key]
data[key.encode('utf8', 'ignore')] = val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
return data |
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data converted to the
supplied units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which equivalencies
are supported for this object, try the ``list_equivalencies``
method. Default: None
kwargs: optional
Any additional keyword arguments are supplied to the
equivalence
Raises
------
If the provided unit does not have the same dimensions as the array
this will raise a UnitConversionError
Examples
--------
>>> from unyt import c, gram
>>> m = 10*gram
>>> E = m*c**2
>>> print(E.in_units('erg'))
8.987551787368176e+21 erg
>>> print(E.in_units('J'))
898755178736817.6 J
"""
units = _sanitize_units_convert(units, self.units.registry)
if equivalence is None:
conv_data = _check_em_conversion(
self.units, units, registry=self.units.registry
)
if any(conv_data):
new_units, (conversion_factor, offset) = _em_conversion(
self.units, conv_data, units
)
offset = 0
else:
new_units = units
(conversion_factor, offset) = self.units.get_conversion_factor(
new_units, self.dtype
)
dsize = self.dtype.itemsize
if self.dtype.kind in ("u", "i"):
large = LARGE_INPUT.get(dsize, 0)
if large and np.any(np.abs(self.d) > large):
warnings.warn(
"Overflow encountered while converting to units '%s'"
% new_units,
RuntimeWarning,
stacklevel=2,
)
new_dtype = np.dtype("f" + str(dsize))
conversion_factor = new_dtype.type(conversion_factor)
ret = np.asarray(self.ndview * conversion_factor, dtype=new_dtype)
if offset:
np.subtract(ret, offset, ret)
new_array = type(self)(ret, new_units, bypass_validation=True)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs) | def function[in_units, parameter[self, units, equivalence]]:
constant[
Creates a copy of this array with the data converted to the
supplied units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which equivalencies
are supported for this object, try the ``list_equivalencies``
method. Default: None
kwargs: optional
Any additional keyword arguments are supplied to the
equivalence
Raises
------
If the provided unit does not have the same dimensions as the array
this will raise a UnitConversionError
Examples
--------
>>> from unyt import c, gram
>>> m = 10*gram
>>> E = m*c**2
>>> print(E.in_units('erg'))
8.987551787368176e+21 erg
>>> print(E.in_units('J'))
898755178736817.6 J
]
variable[units] assign[=] call[name[_sanitize_units_convert], parameter[name[units], name[self].units.registry]]
if compare[name[equivalence] is constant[None]] begin[:]
variable[conv_data] assign[=] call[name[_check_em_conversion], parameter[name[self].units, name[units]]]
if call[name[any], parameter[name[conv_data]]] begin[:]
<ast.Tuple object at 0x7da1b11bbac0> assign[=] call[name[_em_conversion], parameter[name[self].units, name[conv_data], name[units]]]
variable[offset] assign[=] constant[0]
variable[dsize] assign[=] name[self].dtype.itemsize
if compare[name[self].dtype.kind in tuple[[<ast.Constant object at 0x7da1b11ba2c0>, <ast.Constant object at 0x7da1b11ba800>]]] begin[:]
variable[large] assign[=] call[name[LARGE_INPUT].get, parameter[name[dsize], constant[0]]]
if <ast.BoolOp object at 0x7da1b11ba080> begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Overflow encountered while converting to units '%s'] <ast.Mod object at 0x7da2590d6920> name[new_units]], name[RuntimeWarning]]]
variable[new_dtype] assign[=] call[name[np].dtype, parameter[binary_operation[constant[f] + call[name[str], parameter[name[dsize]]]]]]
variable[conversion_factor] assign[=] call[name[new_dtype].type, parameter[name[conversion_factor]]]
variable[ret] assign[=] call[name[np].asarray, parameter[binary_operation[name[self].ndview * name[conversion_factor]]]]
if name[offset] begin[:]
call[name[np].subtract, parameter[name[ret], name[offset], name[ret]]]
variable[new_array] assign[=] call[call[name[type], parameter[name[self]]], parameter[name[ret], name[new_units]]]
return[name[new_array]] | keyword[def] identifier[in_units] ( identifier[self] , identifier[units] , identifier[equivalence] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[units] = identifier[_sanitize_units_convert] ( identifier[units] , identifier[self] . identifier[units] . identifier[registry] )
keyword[if] identifier[equivalence] keyword[is] keyword[None] :
identifier[conv_data] = identifier[_check_em_conversion] (
identifier[self] . identifier[units] , identifier[units] , identifier[registry] = identifier[self] . identifier[units] . identifier[registry]
)
keyword[if] identifier[any] ( identifier[conv_data] ):
identifier[new_units] ,( identifier[conversion_factor] , identifier[offset] )= identifier[_em_conversion] (
identifier[self] . identifier[units] , identifier[conv_data] , identifier[units]
)
identifier[offset] = literal[int]
keyword[else] :
identifier[new_units] = identifier[units]
( identifier[conversion_factor] , identifier[offset] )= identifier[self] . identifier[units] . identifier[get_conversion_factor] (
identifier[new_units] , identifier[self] . identifier[dtype]
)
identifier[dsize] = identifier[self] . identifier[dtype] . identifier[itemsize]
keyword[if] identifier[self] . identifier[dtype] . identifier[kind] keyword[in] ( literal[string] , literal[string] ):
identifier[large] = identifier[LARGE_INPUT] . identifier[get] ( identifier[dsize] , literal[int] )
keyword[if] identifier[large] keyword[and] identifier[np] . identifier[any] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[d] )> identifier[large] ):
identifier[warnings] . identifier[warn] (
literal[string]
% identifier[new_units] ,
identifier[RuntimeWarning] ,
identifier[stacklevel] = literal[int] ,
)
identifier[new_dtype] = identifier[np] . identifier[dtype] ( literal[string] + identifier[str] ( identifier[dsize] ))
identifier[conversion_factor] = identifier[new_dtype] . identifier[type] ( identifier[conversion_factor] )
identifier[ret] = identifier[np] . identifier[asarray] ( identifier[self] . identifier[ndview] * identifier[conversion_factor] , identifier[dtype] = identifier[new_dtype] )
keyword[if] identifier[offset] :
identifier[np] . identifier[subtract] ( identifier[ret] , identifier[offset] , identifier[ret] )
identifier[new_array] = identifier[type] ( identifier[self] )( identifier[ret] , identifier[new_units] , identifier[bypass_validation] = keyword[True] )
keyword[return] identifier[new_array]
keyword[else] :
keyword[return] identifier[self] . identifier[to_equivalent] ( identifier[units] , identifier[equivalence] ,** identifier[kwargs] ) | def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data converted to the
supplied units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which equivalencies
are supported for this object, try the ``list_equivalencies``
method. Default: None
kwargs: optional
Any additional keyword arguments are supplied to the
equivalence
Raises
------
If the provided unit does not have the same dimensions as the array
this will raise a UnitConversionError
Examples
--------
>>> from unyt import c, gram
>>> m = 10*gram
>>> E = m*c**2
>>> print(E.in_units('erg'))
8.987551787368176e+21 erg
>>> print(E.in_units('J'))
898755178736817.6 J
"""
units = _sanitize_units_convert(units, self.units.registry)
if equivalence is None:
conv_data = _check_em_conversion(self.units, units, registry=self.units.registry)
if any(conv_data):
(new_units, (conversion_factor, offset)) = _em_conversion(self.units, conv_data, units)
offset = 0 # depends on [control=['if'], data=[]]
else:
new_units = units
(conversion_factor, offset) = self.units.get_conversion_factor(new_units, self.dtype)
dsize = self.dtype.itemsize
if self.dtype.kind in ('u', 'i'):
large = LARGE_INPUT.get(dsize, 0)
if large and np.any(np.abs(self.d) > large):
warnings.warn("Overflow encountered while converting to units '%s'" % new_units, RuntimeWarning, stacklevel=2) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
new_dtype = np.dtype('f' + str(dsize))
conversion_factor = new_dtype.type(conversion_factor)
ret = np.asarray(self.ndview * conversion_factor, dtype=new_dtype)
if offset:
np.subtract(ret, offset, ret) # depends on [control=['if'], data=[]]
new_array = type(self)(ret, new_units, bypass_validation=True)
return new_array # depends on [control=['if'], data=[]]
else:
return self.to_equivalent(units, equivalence, **kwargs) |
def GetCellValue (self, column, default = None):
""" get a cell, return default if that cell does not exist
note that column and row START AT 1 same as excel
"""
if isinstance(column, str):
column = ColumnToIndex(column)
if column in self:
if "content" in UserDict.__getitem__(self, column):
return UserDict.__getitem__(self, column)["content"]
return default | def function[GetCellValue, parameter[self, column, default]]:
constant[ get a cell, return default if that cell does not exist
note that column and row START AT 1 same as excel
]
if call[name[isinstance], parameter[name[column], name[str]]] begin[:]
variable[column] assign[=] call[name[ColumnToIndex], parameter[name[column]]]
if compare[name[column] in name[self]] begin[:]
if compare[constant[content] in call[name[UserDict].__getitem__, parameter[name[self], name[column]]]] begin[:]
return[call[call[name[UserDict].__getitem__, parameter[name[self], name[column]]]][constant[content]]]
return[name[default]] | keyword[def] identifier[GetCellValue] ( identifier[self] , identifier[column] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[column] , identifier[str] ):
identifier[column] = identifier[ColumnToIndex] ( identifier[column] )
keyword[if] identifier[column] keyword[in] identifier[self] :
keyword[if] literal[string] keyword[in] identifier[UserDict] . identifier[__getitem__] ( identifier[self] , identifier[column] ):
keyword[return] identifier[UserDict] . identifier[__getitem__] ( identifier[self] , identifier[column] )[ literal[string] ]
keyword[return] identifier[default] | def GetCellValue(self, column, default=None):
""" get a cell, return default if that cell does not exist
note that column and row START AT 1 same as excel
"""
if isinstance(column, str):
column = ColumnToIndex(column) # depends on [control=['if'], data=[]]
if column in self:
if 'content' in UserDict.__getitem__(self, column):
return UserDict.__getitem__(self, column)['content'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['column', 'self']]
return default |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.