code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def connect_success(self, connection_id):
"""
Check to see if the successful connection is meant to be peered with.
If not, it should be used to get the peers from the endpoint.
"""
endpoint = self._network.connection_id_to_endpoint(connection_id)
endpoint_info = self._temp_endpoints.get(endpoint)
LOGGER.debug("Endpoint has completed authorization: %s (id: %s)",
endpoint,
connection_id)
if endpoint_info is None:
LOGGER.debug("Received unknown endpoint: %s", endpoint)
elif endpoint_info.status == EndpointStatus.PEERING:
self._connect_success_peering(connection_id, endpoint)
elif endpoint_info.status == EndpointStatus.TOPOLOGY:
self._connect_success_topology(connection_id)
else:
LOGGER.debug("Endpoint has unknown status: %s", endpoint)
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
|
def function[connect_success, parameter[self, connection_id]]:
constant[
Check to see if the successful connection is meant to be peered with.
If not, it should be used to get the peers from the endpoint.
]
variable[endpoint] assign[=] call[name[self]._network.connection_id_to_endpoint, parameter[name[connection_id]]]
variable[endpoint_info] assign[=] call[name[self]._temp_endpoints.get, parameter[name[endpoint]]]
call[name[LOGGER].debug, parameter[constant[Endpoint has completed authorization: %s (id: %s)], name[endpoint], name[connection_id]]]
if compare[name[endpoint_info] is constant[None]] begin[:]
call[name[LOGGER].debug, parameter[constant[Received unknown endpoint: %s], name[endpoint]]]
with name[self]._lock begin[:]
if compare[name[endpoint] in name[self]._temp_endpoints] begin[:]
<ast.Delete object at 0x7da204564f70>
|
keyword[def] identifier[connect_success] ( identifier[self] , identifier[connection_id] ):
literal[string]
identifier[endpoint] = identifier[self] . identifier[_network] . identifier[connection_id_to_endpoint] ( identifier[connection_id] )
identifier[endpoint_info] = identifier[self] . identifier[_temp_endpoints] . identifier[get] ( identifier[endpoint] )
identifier[LOGGER] . identifier[debug] ( literal[string] ,
identifier[endpoint] ,
identifier[connection_id] )
keyword[if] identifier[endpoint_info] keyword[is] keyword[None] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[endpoint] )
keyword[elif] identifier[endpoint_info] . identifier[status] == identifier[EndpointStatus] . identifier[PEERING] :
identifier[self] . identifier[_connect_success_peering] ( identifier[connection_id] , identifier[endpoint] )
keyword[elif] identifier[endpoint_info] . identifier[status] == identifier[EndpointStatus] . identifier[TOPOLOGY] :
identifier[self] . identifier[_connect_success_topology] ( identifier[connection_id] )
keyword[else] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[endpoint] )
keyword[with] identifier[self] . identifier[_lock] :
keyword[if] identifier[endpoint] keyword[in] identifier[self] . identifier[_temp_endpoints] :
keyword[del] identifier[self] . identifier[_temp_endpoints] [ identifier[endpoint] ]
|
def connect_success(self, connection_id):
"""
Check to see if the successful connection is meant to be peered with.
If not, it should be used to get the peers from the endpoint.
"""
endpoint = self._network.connection_id_to_endpoint(connection_id)
endpoint_info = self._temp_endpoints.get(endpoint)
LOGGER.debug('Endpoint has completed authorization: %s (id: %s)', endpoint, connection_id)
if endpoint_info is None:
LOGGER.debug('Received unknown endpoint: %s', endpoint) # depends on [control=['if'], data=[]]
elif endpoint_info.status == EndpointStatus.PEERING:
self._connect_success_peering(connection_id, endpoint) # depends on [control=['if'], data=[]]
elif endpoint_info.status == EndpointStatus.TOPOLOGY:
self._connect_success_topology(connection_id) # depends on [control=['if'], data=[]]
else:
LOGGER.debug('Endpoint has unknown status: %s', endpoint)
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint] # depends on [control=['if'], data=['endpoint']] # depends on [control=['with'], data=[]]
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return CircularAnnulus(**pixel_params)
|
def function[to_pixel, parameter[self, wcs, mode]]:
constant[
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
]
variable[pixel_params] assign[=] call[name[self]._to_pixel_params, parameter[name[wcs]]]
return[call[name[CircularAnnulus], parameter[]]]
|
keyword[def] identifier[to_pixel] ( identifier[self] , identifier[wcs] , identifier[mode] = literal[string] ):
literal[string]
identifier[pixel_params] = identifier[self] . identifier[_to_pixel_params] ( identifier[wcs] , identifier[mode] = identifier[mode] )
keyword[return] identifier[CircularAnnulus] (** identifier[pixel_params] )
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return CircularAnnulus(**pixel_params)
|
def create_content_addressed_github_uri(uri: URI) -> URI:
"""
Returns a content-addressed Github "git_url" that conforms to this scheme.
https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha
Accepts Github-defined "url" that conforms to this scheme
https://api.github.com/repos/:owner/:repo/contents/:path/:to/manifest.json
"""
if not is_valid_api_github_uri(uri):
raise CannotHandleURI(f"{uri} does not conform to Github's API 'url' scheme.")
response = requests.get(uri)
response.raise_for_status()
contents = json.loads(response.content)
if contents["type"] != "file":
raise CannotHandleURI(
f"Expected url to point to a 'file' type, instead received {contents['type']}."
)
return contents["git_url"]
|
def function[create_content_addressed_github_uri, parameter[uri]]:
constant[
Returns a content-addressed Github "git_url" that conforms to this scheme.
https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha
Accepts Github-defined "url" that conforms to this scheme
https://api.github.com/repos/:owner/:repo/contents/:path/:to/manifest.json
]
if <ast.UnaryOp object at 0x7da20e963d30> begin[:]
<ast.Raise object at 0x7da20e9639d0>
variable[response] assign[=] call[name[requests].get, parameter[name[uri]]]
call[name[response].raise_for_status, parameter[]]
variable[contents] assign[=] call[name[json].loads, parameter[name[response].content]]
if compare[call[name[contents]][constant[type]] not_equal[!=] constant[file]] begin[:]
<ast.Raise object at 0x7da204962da0>
return[call[name[contents]][constant[git_url]]]
|
keyword[def] identifier[create_content_addressed_github_uri] ( identifier[uri] : identifier[URI] )-> identifier[URI] :
literal[string]
keyword[if] keyword[not] identifier[is_valid_api_github_uri] ( identifier[uri] ):
keyword[raise] identifier[CannotHandleURI] ( literal[string] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[uri] )
identifier[response] . identifier[raise_for_status] ()
identifier[contents] = identifier[json] . identifier[loads] ( identifier[response] . identifier[content] )
keyword[if] identifier[contents] [ literal[string] ]!= literal[string] :
keyword[raise] identifier[CannotHandleURI] (
literal[string]
)
keyword[return] identifier[contents] [ literal[string] ]
|
def create_content_addressed_github_uri(uri: URI) -> URI:
"""
Returns a content-addressed Github "git_url" that conforms to this scheme.
https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha
Accepts Github-defined "url" that conforms to this scheme
https://api.github.com/repos/:owner/:repo/contents/:path/:to/manifest.json
"""
if not is_valid_api_github_uri(uri):
raise CannotHandleURI(f"{uri} does not conform to Github's API 'url' scheme.") # depends on [control=['if'], data=[]]
response = requests.get(uri)
response.raise_for_status()
contents = json.loads(response.content)
if contents['type'] != 'file':
raise CannotHandleURI(f"Expected url to point to a 'file' type, instead received {contents['type']}.") # depends on [control=['if'], data=[]]
return contents['git_url']
|
def play(self):
"""
Play the video asynchronously returning control immediately to the calling code
"""
if not self.is_playing():
self.play_pause()
self._is_playing = True
self.playEvent(self)
|
def function[play, parameter[self]]:
constant[
Play the video asynchronously returning control immediately to the calling code
]
if <ast.UnaryOp object at 0x7da1b00e6650> begin[:]
call[name[self].play_pause, parameter[]]
name[self]._is_playing assign[=] constant[True]
call[name[self].playEvent, parameter[name[self]]]
|
keyword[def] identifier[play] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_playing] ():
identifier[self] . identifier[play_pause] ()
identifier[self] . identifier[_is_playing] = keyword[True]
identifier[self] . identifier[playEvent] ( identifier[self] )
|
def play(self):
"""
Play the video asynchronously returning control immediately to the calling code
"""
if not self.is_playing():
self.play_pause()
self._is_playing = True
self.playEvent(self) # depends on [control=['if'], data=[]]
|
def is_request_sent(request, relation='ceph'):
"""Check to see if a functionally equivalent request has already been sent
Returns True if a similair request has been sent
@param request: A CephBrokerRq object
"""
states = get_request_states(request, relation=relation)
for rid in states.keys():
if not states[rid]['sent']:
return False
return True
|
def function[is_request_sent, parameter[request, relation]]:
constant[Check to see if a functionally equivalent request has already been sent
Returns True if a similair request has been sent
@param request: A CephBrokerRq object
]
variable[states] assign[=] call[name[get_request_states], parameter[name[request]]]
for taget[name[rid]] in starred[call[name[states].keys, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18bc73970> begin[:]
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[is_request_sent] ( identifier[request] , identifier[relation] = literal[string] ):
literal[string]
identifier[states] = identifier[get_request_states] ( identifier[request] , identifier[relation] = identifier[relation] )
keyword[for] identifier[rid] keyword[in] identifier[states] . identifier[keys] ():
keyword[if] keyword[not] identifier[states] [ identifier[rid] ][ literal[string] ]:
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def is_request_sent(request, relation='ceph'):
"""Check to see if a functionally equivalent request has already been sent
Returns True if a similair request has been sent
@param request: A CephBrokerRq object
"""
states = get_request_states(request, relation=relation)
for rid in states.keys():
if not states[rid]['sent']:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rid']]
return True
|
def create_comment(self, post_id, body):
"""
Create a new comment, associating it with a specific message.
"""
path = '/msg/create_comment'
req = ET.Element('request')
comment = ET.SubElement(req, 'comment')
ET.SubElement(comment, 'post-id').text = str(int(post_id))
ET.SubElement(comment, 'body').text = str(body)
return self._request(path, req)
|
def function[create_comment, parameter[self, post_id, body]]:
constant[
Create a new comment, associating it with a specific message.
]
variable[path] assign[=] constant[/msg/create_comment]
variable[req] assign[=] call[name[ET].Element, parameter[constant[request]]]
variable[comment] assign[=] call[name[ET].SubElement, parameter[name[req], constant[comment]]]
call[name[ET].SubElement, parameter[name[comment], constant[post-id]]].text assign[=] call[name[str], parameter[call[name[int], parameter[name[post_id]]]]]
call[name[ET].SubElement, parameter[name[comment], constant[body]]].text assign[=] call[name[str], parameter[name[body]]]
return[call[name[self]._request, parameter[name[path], name[req]]]]
|
keyword[def] identifier[create_comment] ( identifier[self] , identifier[post_id] , identifier[body] ):
literal[string]
identifier[path] = literal[string]
identifier[req] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[comment] = identifier[ET] . identifier[SubElement] ( identifier[req] , literal[string] )
identifier[ET] . identifier[SubElement] ( identifier[comment] , literal[string] ). identifier[text] = identifier[str] ( identifier[int] ( identifier[post_id] ))
identifier[ET] . identifier[SubElement] ( identifier[comment] , literal[string] ). identifier[text] = identifier[str] ( identifier[body] )
keyword[return] identifier[self] . identifier[_request] ( identifier[path] , identifier[req] )
|
def create_comment(self, post_id, body):
"""
Create a new comment, associating it with a specific message.
"""
path = '/msg/create_comment'
req = ET.Element('request')
comment = ET.SubElement(req, 'comment')
ET.SubElement(comment, 'post-id').text = str(int(post_id))
ET.SubElement(comment, 'body').text = str(body)
return self._request(path, req)
|
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
|
def function[top_referrers, parameter[self, domain_only]]:
constant[
What domains send us the most traffic?
]
variable[referrer] assign[=] call[name[self]._referrer_clause, parameter[name[domain_only]]]
return[call[call[call[call[call[name[self].get_query, parameter[]].select, parameter[name[referrer], call[name[fn].Count, parameter[name[PageView].id]]]].group_by, parameter[name[referrer]]].order_by, parameter[call[call[name[fn].Count, parameter[name[PageView].id]].desc, parameter[]]]].tuples, parameter[]]]
|
keyword[def] identifier[top_referrers] ( identifier[self] , identifier[domain_only] = keyword[True] ):
literal[string]
identifier[referrer] = identifier[self] . identifier[_referrer_clause] ( identifier[domain_only] )
keyword[return] ( identifier[self] . identifier[get_query] ()
. identifier[select] ( identifier[referrer] , identifier[fn] . identifier[Count] ( identifier[PageView] . identifier[id] ))
. identifier[group_by] ( identifier[referrer] )
. identifier[order_by] ( identifier[fn] . identifier[Count] ( identifier[PageView] . identifier[id] ). identifier[desc] ())
. identifier[tuples] ())
|
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return self.get_query().select(referrer, fn.Count(PageView.id)).group_by(referrer).order_by(fn.Count(PageView.id).desc()).tuples()
|
def summary(self):
"""Summarize the performance of a model on a test."""
return "== Model %s did not complete test %s due to error '%s'. ==" %\
(str(self.model), str(self.test), str(self.score))
|
def function[summary, parameter[self]]:
constant[Summarize the performance of a model on a test.]
return[binary_operation[constant[== Model %s did not complete test %s due to error '%s'. ==] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0d0f070>, <ast.Call object at 0x7da20e954820>, <ast.Call object at 0x7da1b0e9c790>]]]]
|
keyword[def] identifier[summary] ( identifier[self] ):
literal[string]
keyword[return] literal[string] %( identifier[str] ( identifier[self] . identifier[model] ), identifier[str] ( identifier[self] . identifier[test] ), identifier[str] ( identifier[self] . identifier[score] ))
|
def summary(self):
"""Summarize the performance of a model on a test."""
return "== Model %s did not complete test %s due to error '%s'. ==" % (str(self.model), str(self.test), str(self.score))
|
def magnitude(self):
"""Return the magnitude when treating the point as a vector."""
return math.sqrt( self.x * self.x + self.y * self.y )
|
def function[magnitude, parameter[self]]:
constant[Return the magnitude when treating the point as a vector.]
return[call[name[math].sqrt, parameter[binary_operation[binary_operation[name[self].x * name[self].x] + binary_operation[name[self].y * name[self].y]]]]]
|
keyword[def] identifier[magnitude] ( identifier[self] ):
literal[string]
keyword[return] identifier[math] . identifier[sqrt] ( identifier[self] . identifier[x] * identifier[self] . identifier[x] + identifier[self] . identifier[y] * identifier[self] . identifier[y] )
|
def magnitude(self):
"""Return the magnitude when treating the point as a vector."""
return math.sqrt(self.x * self.x + self.y * self.y)
|
def run_interactive(query, editor=None, just_count=False, default_no=False):
"""
Asks the user about each patch suggested by the result of the query.
@param query An instance of the Query class.
@param editor Name of editor to use for manual intervention, e.g.
'vim'
or 'emacs'. If omitted/None, defaults to $EDITOR
environment variable.
@param just_count If true: don't run normally. Just print out number of
places in the codebase where the query matches.
"""
global yes_to_all
# Load start from bookmark, if appropriate.
bookmark = _load_bookmark()
if bookmark:
print('Resume where you left off, at %s (y/n)? '
% str(bookmark), end=' ')
sys.stdout.flush()
if (_prompt(default='y') == 'y'):
query.start_position = bookmark
# Okay, enough of this foolishness of computing start and end.
# Let's ask the user about some one line diffs!
print('Searching for first instance...')
suggestions = query.generate_patches()
if just_count:
for count, _ in enumerate(suggestions):
terminal.terminal_move_to_beginning_of_line()
print(count, end=" ")
sys.stdout.flush() # since print statement ends in comma
print()
return
for patch in suggestions:
_save_bookmark(patch.start_position)
_ask_about_patch(patch, editor, default_no)
print('Searching...')
_delete_bookmark()
if yes_to_all:
terminal.terminal_clear()
print(
"You MUST indicate in your code review:"
" \"codemod with 'Yes to all'\"."
"Make sure you and other people review the changes.\n\n"
"With great power, comes great responsibility."
)
|
def function[run_interactive, parameter[query, editor, just_count, default_no]]:
constant[
Asks the user about each patch suggested by the result of the query.
@param query An instance of the Query class.
@param editor Name of editor to use for manual intervention, e.g.
'vim'
or 'emacs'. If omitted/None, defaults to $EDITOR
environment variable.
@param just_count If true: don't run normally. Just print out number of
places in the codebase where the query matches.
]
<ast.Global object at 0x7da2041db460>
variable[bookmark] assign[=] call[name[_load_bookmark], parameter[]]
if name[bookmark] begin[:]
call[name[print], parameter[binary_operation[constant[Resume where you left off, at %s (y/n)? ] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[bookmark]]]]]]
call[name[sys].stdout.flush, parameter[]]
if compare[call[name[_prompt], parameter[]] equal[==] constant[y]] begin[:]
name[query].start_position assign[=] name[bookmark]
call[name[print], parameter[constant[Searching for first instance...]]]
variable[suggestions] assign[=] call[name[query].generate_patches, parameter[]]
if name[just_count] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1dd8820>, <ast.Name object at 0x7da1b1dd8850>]]] in starred[call[name[enumerate], parameter[name[suggestions]]]] begin[:]
call[name[terminal].terminal_move_to_beginning_of_line, parameter[]]
call[name[print], parameter[name[count]]]
call[name[sys].stdout.flush, parameter[]]
call[name[print], parameter[]]
return[None]
for taget[name[patch]] in starred[name[suggestions]] begin[:]
call[name[_save_bookmark], parameter[name[patch].start_position]]
call[name[_ask_about_patch], parameter[name[patch], name[editor], name[default_no]]]
call[name[print], parameter[constant[Searching...]]]
call[name[_delete_bookmark], parameter[]]
if name[yes_to_all] begin[:]
call[name[terminal].terminal_clear, parameter[]]
call[name[print], parameter[constant[You MUST indicate in your code review: "codemod with 'Yes to all'".Make sure you and other people review the changes.
With great power, comes great responsibility.]]]
|
keyword[def] identifier[run_interactive] ( identifier[query] , identifier[editor] = keyword[None] , identifier[just_count] = keyword[False] , identifier[default_no] = keyword[False] ):
literal[string]
keyword[global] identifier[yes_to_all]
identifier[bookmark] = identifier[_load_bookmark] ()
keyword[if] identifier[bookmark] :
identifier[print] ( literal[string]
% identifier[str] ( identifier[bookmark] ), identifier[end] = literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[if] ( identifier[_prompt] ( identifier[default] = literal[string] )== literal[string] ):
identifier[query] . identifier[start_position] = identifier[bookmark]
identifier[print] ( literal[string] )
identifier[suggestions] = identifier[query] . identifier[generate_patches] ()
keyword[if] identifier[just_count] :
keyword[for] identifier[count] , identifier[_] keyword[in] identifier[enumerate] ( identifier[suggestions] ):
identifier[terminal] . identifier[terminal_move_to_beginning_of_line] ()
identifier[print] ( identifier[count] , identifier[end] = literal[string] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[print] ()
keyword[return]
keyword[for] identifier[patch] keyword[in] identifier[suggestions] :
identifier[_save_bookmark] ( identifier[patch] . identifier[start_position] )
identifier[_ask_about_patch] ( identifier[patch] , identifier[editor] , identifier[default_no] )
identifier[print] ( literal[string] )
identifier[_delete_bookmark] ()
keyword[if] identifier[yes_to_all] :
identifier[terminal] . identifier[terminal_clear] ()
identifier[print] (
literal[string]
literal[string]
literal[string]
literal[string]
)
|
def run_interactive(query, editor=None, just_count=False, default_no=False):
"""
Asks the user about each patch suggested by the result of the query.
@param query An instance of the Query class.
@param editor Name of editor to use for manual intervention, e.g.
'vim'
or 'emacs'. If omitted/None, defaults to $EDITOR
environment variable.
@param just_count If true: don't run normally. Just print out number of
places in the codebase where the query matches.
"""
global yes_to_all
# Load start from bookmark, if appropriate.
bookmark = _load_bookmark()
if bookmark:
print('Resume where you left off, at %s (y/n)? ' % str(bookmark), end=' ')
sys.stdout.flush()
if _prompt(default='y') == 'y':
query.start_position = bookmark # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Okay, enough of this foolishness of computing start and end.
# Let's ask the user about some one line diffs!
print('Searching for first instance...')
suggestions = query.generate_patches()
if just_count:
for (count, _) in enumerate(suggestions):
terminal.terminal_move_to_beginning_of_line()
print(count, end=' ')
sys.stdout.flush() # since print statement ends in comma # depends on [control=['for'], data=[]]
print()
return # depends on [control=['if'], data=[]]
for patch in suggestions:
_save_bookmark(patch.start_position)
_ask_about_patch(patch, editor, default_no)
print('Searching...') # depends on [control=['for'], data=['patch']]
_delete_bookmark()
if yes_to_all:
terminal.terminal_clear()
print('You MUST indicate in your code review: "codemod with \'Yes to all\'".Make sure you and other people review the changes.\n\nWith great power, comes great responsibility.') # depends on [control=['if'], data=[]]
|
def agg_conc(original_countries,
aggregates,
missing_countries='test',
merge_multiple_string='_&_',
log_missing_countries=None,
log_merge_multiple_strings=None,
coco=None,
as_dataframe='sparse',
original_countries_class=None):
""" Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe')
"""
if coco is None:
coco = CountryConverter()
if type(original_countries) is str:
original_countries_class = original_countries
original_countries = coco.data[original_countries].values
else:
original_countries_class = (original_countries_class or
coco._get_input_format_from_name(
original_countries[0]))
if type(aggregates) is not list:
aggregates = [aggregates]
correspond = OrderedDict.fromkeys(original_countries)
for agg in aggregates:
if type(agg) is str:
agg = coco.get_correspondance_dict(original_countries_class,
agg)
for country in original_countries:
if correspond.get(country) is None:
try:
entry = agg[country]
except KeyError:
entry = None
if type(entry) is list:
if 1 < len(entry):
if merge_multiple_string:
entry = merge_multiple_string.join([
str(e) for e in entry])
else:
entry = None
if log_merge_multiple_strings:
log_merge_multiple_strings(country)
else:
entry = entry[0]
correspond[country] = entry
for country in original_countries:
if correspond.get(country) is None:
if missing_countries is True:
correspond[country] = country
elif missing_countries is False:
del correspond[country]
else:
correspond[country] = missing_countries
if log_missing_countries:
log_missing_countries(country)
if as_dataframe:
correspond = pd.DataFrame.from_dict(
correspond, orient='index').reset_index()
correspond.columns = ['original', 'aggregated']
if ((type(as_dataframe) is str) and
(as_dataframe[0].lower() == 'f')):
_co_list = correspond.original
correspond['val'] = 1
correspond = correspond.set_index(
['original', 'aggregated']).unstack().fillna(0)['val']
correspond = correspond.loc[_co_list]
return correspond
|
def function[agg_conc, parameter[original_countries, aggregates, missing_countries, merge_multiple_string, log_missing_countries, log_merge_multiple_strings, coco, as_dataframe, original_countries_class]]:
constant[ Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe')
]
if compare[name[coco] is constant[None]] begin[:]
variable[coco] assign[=] call[name[CountryConverter], parameter[]]
if compare[call[name[type], parameter[name[original_countries]]] is name[str]] begin[:]
variable[original_countries_class] assign[=] name[original_countries]
variable[original_countries] assign[=] call[name[coco].data][name[original_countries]].values
if compare[call[name[type], parameter[name[aggregates]]] is_not name[list]] begin[:]
variable[aggregates] assign[=] list[[<ast.Name object at 0x7da2049626b0>]]
variable[correspond] assign[=] call[name[OrderedDict].fromkeys, parameter[name[original_countries]]]
for taget[name[agg]] in starred[name[aggregates]] begin[:]
if compare[call[name[type], parameter[name[agg]]] is name[str]] begin[:]
variable[agg] assign[=] call[name[coco].get_correspondance_dict, parameter[name[original_countries_class], name[agg]]]
for taget[name[country]] in starred[name[original_countries]] begin[:]
if compare[call[name[correspond].get, parameter[name[country]]] is constant[None]] begin[:]
<ast.Try object at 0x7da204962980>
if compare[call[name[type], parameter[name[entry]]] is name[list]] begin[:]
if compare[constant[1] less[<] call[name[len], parameter[name[entry]]]] begin[:]
if name[merge_multiple_string] begin[:]
variable[entry] assign[=] call[name[merge_multiple_string].join, parameter[<ast.ListComp object at 0x7da18ede6e90>]]
if name[log_merge_multiple_strings] begin[:]
call[name[log_merge_multiple_strings], parameter[name[country]]]
call[name[correspond]][name[country]] assign[=] name[entry]
for taget[name[country]] in starred[name[original_countries]] begin[:]
if compare[call[name[correspond].get, parameter[name[country]]] is constant[None]] begin[:]
if compare[name[missing_countries] is constant[True]] begin[:]
call[name[correspond]][name[country]] assign[=] name[country]
if name[log_missing_countries] begin[:]
call[name[log_missing_countries], parameter[name[country]]]
if name[as_dataframe] begin[:]
variable[correspond] assign[=] call[call[name[pd].DataFrame.from_dict, parameter[name[correspond]]].reset_index, parameter[]]
name[correspond].columns assign[=] list[[<ast.Constant object at 0x7da18ede4c10>, <ast.Constant object at 0x7da18ede52d0>]]
if <ast.BoolOp object at 0x7da18ede42e0> begin[:]
variable[_co_list] assign[=] name[correspond].original
call[name[correspond]][constant[val]] assign[=] constant[1]
variable[correspond] assign[=] call[call[call[call[name[correspond].set_index, parameter[list[[<ast.Constant object at 0x7da18ede5930>, <ast.Constant object at 0x7da18ede4400>]]]].unstack, parameter[]].fillna, parameter[constant[0]]]][constant[val]]
variable[correspond] assign[=] call[name[correspond].loc][name[_co_list]]
return[name[correspond]]
|
keyword[def] identifier[agg_conc] ( identifier[original_countries] ,
identifier[aggregates] ,
identifier[missing_countries] = literal[string] ,
identifier[merge_multiple_string] = literal[string] ,
identifier[log_missing_countries] = keyword[None] ,
identifier[log_merge_multiple_strings] = keyword[None] ,
identifier[coco] = keyword[None] ,
identifier[as_dataframe] = literal[string] ,
identifier[original_countries_class] = keyword[None] ):
literal[string]
keyword[if] identifier[coco] keyword[is] keyword[None] :
identifier[coco] = identifier[CountryConverter] ()
keyword[if] identifier[type] ( identifier[original_countries] ) keyword[is] identifier[str] :
identifier[original_countries_class] = identifier[original_countries]
identifier[original_countries] = identifier[coco] . identifier[data] [ identifier[original_countries] ]. identifier[values]
keyword[else] :
identifier[original_countries_class] =( identifier[original_countries_class] keyword[or]
identifier[coco] . identifier[_get_input_format_from_name] (
identifier[original_countries] [ literal[int] ]))
keyword[if] identifier[type] ( identifier[aggregates] ) keyword[is] keyword[not] identifier[list] :
identifier[aggregates] =[ identifier[aggregates] ]
identifier[correspond] = identifier[OrderedDict] . identifier[fromkeys] ( identifier[original_countries] )
keyword[for] identifier[agg] keyword[in] identifier[aggregates] :
keyword[if] identifier[type] ( identifier[agg] ) keyword[is] identifier[str] :
identifier[agg] = identifier[coco] . identifier[get_correspondance_dict] ( identifier[original_countries_class] ,
identifier[agg] )
keyword[for] identifier[country] keyword[in] identifier[original_countries] :
keyword[if] identifier[correspond] . identifier[get] ( identifier[country] ) keyword[is] keyword[None] :
keyword[try] :
identifier[entry] = identifier[agg] [ identifier[country] ]
keyword[except] identifier[KeyError] :
identifier[entry] = keyword[None]
keyword[if] identifier[type] ( identifier[entry] ) keyword[is] identifier[list] :
keyword[if] literal[int] < identifier[len] ( identifier[entry] ):
keyword[if] identifier[merge_multiple_string] :
identifier[entry] = identifier[merge_multiple_string] . identifier[join] ([
identifier[str] ( identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[entry] ])
keyword[else] :
identifier[entry] = keyword[None]
keyword[if] identifier[log_merge_multiple_strings] :
identifier[log_merge_multiple_strings] ( identifier[country] )
keyword[else] :
identifier[entry] = identifier[entry] [ literal[int] ]
identifier[correspond] [ identifier[country] ]= identifier[entry]
keyword[for] identifier[country] keyword[in] identifier[original_countries] :
keyword[if] identifier[correspond] . identifier[get] ( identifier[country] ) keyword[is] keyword[None] :
keyword[if] identifier[missing_countries] keyword[is] keyword[True] :
identifier[correspond] [ identifier[country] ]= identifier[country]
keyword[elif] identifier[missing_countries] keyword[is] keyword[False] :
keyword[del] identifier[correspond] [ identifier[country] ]
keyword[else] :
identifier[correspond] [ identifier[country] ]= identifier[missing_countries]
keyword[if] identifier[log_missing_countries] :
identifier[log_missing_countries] ( identifier[country] )
keyword[if] identifier[as_dataframe] :
identifier[correspond] = identifier[pd] . identifier[DataFrame] . identifier[from_dict] (
identifier[correspond] , identifier[orient] = literal[string] ). identifier[reset_index] ()
identifier[correspond] . identifier[columns] =[ literal[string] , literal[string] ]
keyword[if] (( identifier[type] ( identifier[as_dataframe] ) keyword[is] identifier[str] ) keyword[and]
( identifier[as_dataframe] [ literal[int] ]. identifier[lower] ()== literal[string] )):
identifier[_co_list] = identifier[correspond] . identifier[original]
identifier[correspond] [ literal[string] ]= literal[int]
identifier[correspond] = identifier[correspond] . identifier[set_index] (
[ literal[string] , literal[string] ]). identifier[unstack] (). identifier[fillna] ( literal[int] )[ literal[string] ]
identifier[correspond] = identifier[correspond] . identifier[loc] [ identifier[_co_list] ]
keyword[return] identifier[correspond]
|
def agg_conc(original_countries, aggregates, missing_countries='test', merge_multiple_string='_&_', log_missing_countries=None, log_merge_multiple_strings=None, coco=None, as_dataframe='sparse', original_countries_class=None):
""" Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe')
"""
if coco is None:
coco = CountryConverter() # depends on [control=['if'], data=['coco']]
if type(original_countries) is str:
original_countries_class = original_countries
original_countries = coco.data[original_countries].values # depends on [control=['if'], data=[]]
else:
original_countries_class = original_countries_class or coco._get_input_format_from_name(original_countries[0])
if type(aggregates) is not list:
aggregates = [aggregates] # depends on [control=['if'], data=[]]
correspond = OrderedDict.fromkeys(original_countries)
for agg in aggregates:
if type(agg) is str:
agg = coco.get_correspondance_dict(original_countries_class, agg) # depends on [control=['if'], data=[]]
for country in original_countries:
if correspond.get(country) is None:
try:
entry = agg[country] # depends on [control=['try'], data=[]]
except KeyError:
entry = None # depends on [control=['except'], data=[]]
if type(entry) is list:
if 1 < len(entry):
if merge_multiple_string:
entry = merge_multiple_string.join([str(e) for e in entry]) # depends on [control=['if'], data=[]]
else:
entry = None
if log_merge_multiple_strings:
log_merge_multiple_strings(country) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
entry = entry[0] # depends on [control=['if'], data=[]]
correspond[country] = entry # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['country']] # depends on [control=['for'], data=['agg']]
for country in original_countries:
if correspond.get(country) is None:
if missing_countries is True:
correspond[country] = country # depends on [control=['if'], data=[]]
elif missing_countries is False:
del correspond[country] # depends on [control=['if'], data=[]]
else:
correspond[country] = missing_countries
if log_missing_countries:
log_missing_countries(country) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['country']]
if as_dataframe:
correspond = pd.DataFrame.from_dict(correspond, orient='index').reset_index()
correspond.columns = ['original', 'aggregated']
if type(as_dataframe) is str and as_dataframe[0].lower() == 'f':
_co_list = correspond.original
correspond['val'] = 1
correspond = correspond.set_index(['original', 'aggregated']).unstack().fillna(0)['val']
correspond = correspond.loc[_co_list] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return correspond
|
def _init_dbus(self):
"""
Get the device id
"""
_bus = SessionBus()
if self.device_id is None:
self.device_id = self._get_device_id(_bus)
if self.device_id is None:
return False
try:
self._dev = _bus.get(SERVICE_BUS, DEVICE_PATH + "/%s" % self.device_id)
except Exception:
return False
return True
|
def function[_init_dbus, parameter[self]]:
constant[
Get the device id
]
variable[_bus] assign[=] call[name[SessionBus], parameter[]]
if compare[name[self].device_id is constant[None]] begin[:]
name[self].device_id assign[=] call[name[self]._get_device_id, parameter[name[_bus]]]
if compare[name[self].device_id is constant[None]] begin[:]
return[constant[False]]
<ast.Try object at 0x7da18bc72770>
return[constant[True]]
|
keyword[def] identifier[_init_dbus] ( identifier[self] ):
literal[string]
identifier[_bus] = identifier[SessionBus] ()
keyword[if] identifier[self] . identifier[device_id] keyword[is] keyword[None] :
identifier[self] . identifier[device_id] = identifier[self] . identifier[_get_device_id] ( identifier[_bus] )
keyword[if] identifier[self] . identifier[device_id] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[try] :
identifier[self] . identifier[_dev] = identifier[_bus] . identifier[get] ( identifier[SERVICE_BUS] , identifier[DEVICE_PATH] + literal[string] % identifier[self] . identifier[device_id] )
keyword[except] identifier[Exception] :
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def _init_dbus(self):
"""
Get the device id
"""
_bus = SessionBus()
if self.device_id is None:
self.device_id = self._get_device_id(_bus)
if self.device_id is None:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
self._dev = _bus.get(SERVICE_BUS, DEVICE_PATH + '/%s' % self.device_id) # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]]
return True
|
def clean(self):
"""drop regularization and prior information observation from the jco
"""
if self.pst_arg is None:
self.logger.statement("linear_analysis.clean(): not pst object")
return
if not self.pst.estimation and self.pst.nprior > 0:
self.drop_prior_information()
|
def function[clean, parameter[self]]:
constant[drop regularization and prior information observation from the jco
]
if compare[name[self].pst_arg is constant[None]] begin[:]
call[name[self].logger.statement, parameter[constant[linear_analysis.clean(): not pst object]]]
return[None]
if <ast.BoolOp object at 0x7da1b1d6c0d0> begin[:]
call[name[self].drop_prior_information, parameter[]]
|
keyword[def] identifier[clean] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[pst_arg] keyword[is] keyword[None] :
identifier[self] . identifier[logger] . identifier[statement] ( literal[string] )
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[pst] . identifier[estimation] keyword[and] identifier[self] . identifier[pst] . identifier[nprior] > literal[int] :
identifier[self] . identifier[drop_prior_information] ()
|
def clean(self):
"""drop regularization and prior information observation from the jco
"""
if self.pst_arg is None:
self.logger.statement('linear_analysis.clean(): not pst object')
return # depends on [control=['if'], data=[]]
if not self.pst.estimation and self.pst.nprior > 0:
self.drop_prior_information() # depends on [control=['if'], data=[]]
|
def cli(user, password, geometry, start, end, uuid, name, download, sentinel, producttype,
instrument, cloud, footprints, path, query, url, order_by, limit):
"""Search for Sentinel products and, optionally, download all the results
and/or create a geojson file with the search result footprints.
Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
containing the geometry of the area you want to search for or the UUIDs of the products. If you
don't specify the start and end dates, it will search in the last 24 hours.
"""
_set_logger_handler()
if user is None or password is None:
try:
user, password = requests.utils.get_netrc_auth(url)
except TypeError:
pass
if user is None or password is None:
raise click.UsageError('Missing --user and --password. Please see docs '
'for environment variables and .netrc support.')
api = SentinelAPI(user, password, url)
search_kwargs = {}
if sentinel and not (producttype or instrument):
search_kwargs["platformname"] = "Sentinel-" + sentinel
if instrument and not producttype:
search_kwargs["instrumentshortname"] = instrument
if producttype:
search_kwargs["producttype"] = producttype
if cloud:
if sentinel not in ['2', '3']:
logger.error('Cloud cover is only supported for Sentinel 2 and 3.')
exit(1)
search_kwargs["cloudcoverpercentage"] = (0, cloud)
if query is not None:
search_kwargs.update((x.split('=') for x in query))
if geometry is not None:
search_kwargs['area'] = geojson_to_wkt(read_geojson(geometry))
if uuid is not None:
uuid_list = [x.strip() for x in uuid]
products = {}
for productid in uuid_list:
try:
products[productid] = api.get_product_odata(productid)
except SentinelAPIError as e:
if 'Invalid key' in e.msg:
logger.error('No product with ID \'%s\' exists on server', productid)
exit(1)
else:
raise
elif name is not None:
search_kwargs["identifier"] = name[0] if len(name) == 1 else '(' + ' OR '.join(name) + ')'
products = api.query(order_by=order_by, limit=limit, **search_kwargs)
else:
start = start or "19000101"
end = end or "NOW"
products = api.query(date=(start, end),
order_by=order_by, limit=limit, **search_kwargs)
if footprints is True:
footprints_geojson = api.to_geojson(products)
with open(os.path.join(path, "search_footprints.geojson"), "w") as outfile:
outfile.write(gj.dumps(footprints_geojson))
if download is True:
product_infos, triggered, failed_downloads = api.download_all(products, path)
if len(failed_downloads) > 0:
with open(os.path.join(path, "corrupt_scenes.txt"), "w") as outfile:
for failed_id in failed_downloads:
outfile.write("%s : %s\n" % (failed_id, products[failed_id]['title']))
else:
for product_id, props in products.items():
if uuid is None:
logger.info('Product %s - %s', product_id, props['summary'])
else: # querying uuids has no summary key
logger.info('Product %s - %s - %s MB', product_id, props['title'],
round(int(props['size']) / (1024. * 1024.), 2))
if uuid is None:
logger.info('---')
logger.info('%s scenes found with a total size of %.2f GB',
len(products), api.get_products_size(products))
|
def function[cli, parameter[user, password, geometry, start, end, uuid, name, download, sentinel, producttype, instrument, cloud, footprints, path, query, url, order_by, limit]]:
constant[Search for Sentinel products and, optionally, download all the results
and/or create a geojson file with the search result footprints.
Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
containing the geometry of the area you want to search for or the UUIDs of the products. If you
don't specify the start and end dates, it will search in the last 24 hours.
]
call[name[_set_logger_handler], parameter[]]
if <ast.BoolOp object at 0x7da18f00d3c0> begin[:]
<ast.Try object at 0x7da18f00cca0>
if <ast.BoolOp object at 0x7da18f00e500> begin[:]
<ast.Raise object at 0x7da18f00fd00>
variable[api] assign[=] call[name[SentinelAPI], parameter[name[user], name[password], name[url]]]
variable[search_kwargs] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da18f00caf0> begin[:]
call[name[search_kwargs]][constant[platformname]] assign[=] binary_operation[constant[Sentinel-] + name[sentinel]]
if <ast.BoolOp object at 0x7da18f00f5e0> begin[:]
call[name[search_kwargs]][constant[instrumentshortname]] assign[=] name[instrument]
if name[producttype] begin[:]
call[name[search_kwargs]][constant[producttype]] assign[=] name[producttype]
if name[cloud] begin[:]
if compare[name[sentinel] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f00c8e0>, <ast.Constant object at 0x7da18f00e230>]]] begin[:]
call[name[logger].error, parameter[constant[Cloud cover is only supported for Sentinel 2 and 3.]]]
call[name[exit], parameter[constant[1]]]
call[name[search_kwargs]][constant[cloudcoverpercentage]] assign[=] tuple[[<ast.Constant object at 0x7da18f00c190>, <ast.Name object at 0x7da18f00fe20>]]
if compare[name[query] is_not constant[None]] begin[:]
call[name[search_kwargs].update, parameter[<ast.GeneratorExp object at 0x7da18f00d810>]]
if compare[name[geometry] is_not constant[None]] begin[:]
call[name[search_kwargs]][constant[area]] assign[=] call[name[geojson_to_wkt], parameter[call[name[read_geojson], parameter[name[geometry]]]]]
if compare[name[uuid] is_not constant[None]] begin[:]
variable[uuid_list] assign[=] <ast.ListComp object at 0x7da18f00cac0>
variable[products] assign[=] dictionary[[], []]
for taget[name[productid]] in starred[name[uuid_list]] begin[:]
<ast.Try object at 0x7da18f00c2b0>
if compare[name[footprints] is constant[True]] begin[:]
variable[footprints_geojson] assign[=] call[name[api].to_geojson, parameter[name[products]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[path], constant[search_footprints.geojson]]], constant[w]]] begin[:]
call[name[outfile].write, parameter[call[name[gj].dumps, parameter[name[footprints_geojson]]]]]
if compare[name[download] is constant[True]] begin[:]
<ast.Tuple object at 0x7da1b26aed70> assign[=] call[name[api].download_all, parameter[name[products], name[path]]]
if compare[call[name[len], parameter[name[failed_downloads]]] greater[>] constant[0]] begin[:]
with call[name[open], parameter[call[name[os].path.join, parameter[name[path], constant[corrupt_scenes.txt]]], constant[w]]] begin[:]
for taget[name[failed_id]] in starred[name[failed_downloads]] begin[:]
call[name[outfile].write, parameter[binary_operation[constant[%s : %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ae590>, <ast.Subscript object at 0x7da1b26ae2c0>]]]]]
|
keyword[def] identifier[cli] ( identifier[user] , identifier[password] , identifier[geometry] , identifier[start] , identifier[end] , identifier[uuid] , identifier[name] , identifier[download] , identifier[sentinel] , identifier[producttype] ,
identifier[instrument] , identifier[cloud] , identifier[footprints] , identifier[path] , identifier[query] , identifier[url] , identifier[order_by] , identifier[limit] ):
literal[string]
identifier[_set_logger_handler] ()
keyword[if] identifier[user] keyword[is] keyword[None] keyword[or] identifier[password] keyword[is] keyword[None] :
keyword[try] :
identifier[user] , identifier[password] = identifier[requests] . identifier[utils] . identifier[get_netrc_auth] ( identifier[url] )
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[if] identifier[user] keyword[is] keyword[None] keyword[or] identifier[password] keyword[is] keyword[None] :
keyword[raise] identifier[click] . identifier[UsageError] ( literal[string]
literal[string] )
identifier[api] = identifier[SentinelAPI] ( identifier[user] , identifier[password] , identifier[url] )
identifier[search_kwargs] ={}
keyword[if] identifier[sentinel] keyword[and] keyword[not] ( identifier[producttype] keyword[or] identifier[instrument] ):
identifier[search_kwargs] [ literal[string] ]= literal[string] + identifier[sentinel]
keyword[if] identifier[instrument] keyword[and] keyword[not] identifier[producttype] :
identifier[search_kwargs] [ literal[string] ]= identifier[instrument]
keyword[if] identifier[producttype] :
identifier[search_kwargs] [ literal[string] ]= identifier[producttype]
keyword[if] identifier[cloud] :
keyword[if] identifier[sentinel] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[logger] . identifier[error] ( literal[string] )
identifier[exit] ( literal[int] )
identifier[search_kwargs] [ literal[string] ]=( literal[int] , identifier[cloud] )
keyword[if] identifier[query] keyword[is] keyword[not] keyword[None] :
identifier[search_kwargs] . identifier[update] (( identifier[x] . identifier[split] ( literal[string] ) keyword[for] identifier[x] keyword[in] identifier[query] ))
keyword[if] identifier[geometry] keyword[is] keyword[not] keyword[None] :
identifier[search_kwargs] [ literal[string] ]= identifier[geojson_to_wkt] ( identifier[read_geojson] ( identifier[geometry] ))
keyword[if] identifier[uuid] keyword[is] keyword[not] keyword[None] :
identifier[uuid_list] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[uuid] ]
identifier[products] ={}
keyword[for] identifier[productid] keyword[in] identifier[uuid_list] :
keyword[try] :
identifier[products] [ identifier[productid] ]= identifier[api] . identifier[get_product_odata] ( identifier[productid] )
keyword[except] identifier[SentinelAPIError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[e] . identifier[msg] :
identifier[logger] . identifier[error] ( literal[string] , identifier[productid] )
identifier[exit] ( literal[int] )
keyword[else] :
keyword[raise]
keyword[elif] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[search_kwargs] [ literal[string] ]= identifier[name] [ literal[int] ] keyword[if] identifier[len] ( identifier[name] )== literal[int] keyword[else] literal[string] + literal[string] . identifier[join] ( identifier[name] )+ literal[string]
identifier[products] = identifier[api] . identifier[query] ( identifier[order_by] = identifier[order_by] , identifier[limit] = identifier[limit] ,** identifier[search_kwargs] )
keyword[else] :
identifier[start] = identifier[start] keyword[or] literal[string]
identifier[end] = identifier[end] keyword[or] literal[string]
identifier[products] = identifier[api] . identifier[query] ( identifier[date] =( identifier[start] , identifier[end] ),
identifier[order_by] = identifier[order_by] , identifier[limit] = identifier[limit] ,** identifier[search_kwargs] )
keyword[if] identifier[footprints] keyword[is] keyword[True] :
identifier[footprints_geojson] = identifier[api] . identifier[to_geojson] ( identifier[products] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ), literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[gj] . identifier[dumps] ( identifier[footprints_geojson] ))
keyword[if] identifier[download] keyword[is] keyword[True] :
identifier[product_infos] , identifier[triggered] , identifier[failed_downloads] = identifier[api] . identifier[download_all] ( identifier[products] , identifier[path] )
keyword[if] identifier[len] ( identifier[failed_downloads] )> literal[int] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ), literal[string] ) keyword[as] identifier[outfile] :
keyword[for] identifier[failed_id] keyword[in] identifier[failed_downloads] :
identifier[outfile] . identifier[write] ( literal[string] %( identifier[failed_id] , identifier[products] [ identifier[failed_id] ][ literal[string] ]))
keyword[else] :
keyword[for] identifier[product_id] , identifier[props] keyword[in] identifier[products] . identifier[items] ():
keyword[if] identifier[uuid] keyword[is] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] , identifier[product_id] , identifier[props] [ literal[string] ])
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] , identifier[product_id] , identifier[props] [ literal[string] ],
identifier[round] ( identifier[int] ( identifier[props] [ literal[string] ])/( literal[int] * literal[int] ), literal[int] ))
keyword[if] identifier[uuid] keyword[is] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] ,
identifier[len] ( identifier[products] ), identifier[api] . identifier[get_products_size] ( identifier[products] ))
|
def cli(user, password, geometry, start, end, uuid, name, download, sentinel, producttype, instrument, cloud, footprints, path, query, url, order_by, limit):
"""Search for Sentinel products and, optionally, download all the results
and/or create a geojson file with the search result footprints.
Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
containing the geometry of the area you want to search for or the UUIDs of the products. If you
don't specify the start and end dates, it will search in the last 24 hours.
"""
_set_logger_handler()
if user is None or password is None:
try:
(user, password) = requests.utils.get_netrc_auth(url) # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if user is None or password is None:
raise click.UsageError('Missing --user and --password. Please see docs for environment variables and .netrc support.') # depends on [control=['if'], data=[]]
api = SentinelAPI(user, password, url)
search_kwargs = {}
if sentinel and (not (producttype or instrument)):
search_kwargs['platformname'] = 'Sentinel-' + sentinel # depends on [control=['if'], data=[]]
if instrument and (not producttype):
search_kwargs['instrumentshortname'] = instrument # depends on [control=['if'], data=[]]
if producttype:
search_kwargs['producttype'] = producttype # depends on [control=['if'], data=[]]
if cloud:
if sentinel not in ['2', '3']:
logger.error('Cloud cover is only supported for Sentinel 2 and 3.')
exit(1) # depends on [control=['if'], data=[]]
search_kwargs['cloudcoverpercentage'] = (0, cloud) # depends on [control=['if'], data=[]]
if query is not None:
search_kwargs.update((x.split('=') for x in query)) # depends on [control=['if'], data=['query']]
if geometry is not None:
search_kwargs['area'] = geojson_to_wkt(read_geojson(geometry)) # depends on [control=['if'], data=['geometry']]
if uuid is not None:
uuid_list = [x.strip() for x in uuid]
products = {}
for productid in uuid_list:
try:
products[productid] = api.get_product_odata(productid) # depends on [control=['try'], data=[]]
except SentinelAPIError as e:
if 'Invalid key' in e.msg:
logger.error("No product with ID '%s' exists on server", productid)
exit(1) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['productid']] # depends on [control=['if'], data=['uuid']]
elif name is not None:
search_kwargs['identifier'] = name[0] if len(name) == 1 else '(' + ' OR '.join(name) + ')'
products = api.query(order_by=order_by, limit=limit, **search_kwargs) # depends on [control=['if'], data=['name']]
else:
start = start or '19000101'
end = end or 'NOW'
products = api.query(date=(start, end), order_by=order_by, limit=limit, **search_kwargs)
if footprints is True:
footprints_geojson = api.to_geojson(products)
with open(os.path.join(path, 'search_footprints.geojson'), 'w') as outfile:
outfile.write(gj.dumps(footprints_geojson)) # depends on [control=['with'], data=['outfile']] # depends on [control=['if'], data=[]]
if download is True:
(product_infos, triggered, failed_downloads) = api.download_all(products, path)
if len(failed_downloads) > 0:
with open(os.path.join(path, 'corrupt_scenes.txt'), 'w') as outfile:
for failed_id in failed_downloads:
outfile.write('%s : %s\n' % (failed_id, products[failed_id]['title'])) # depends on [control=['for'], data=['failed_id']] # depends on [control=['with'], data=['outfile']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for (product_id, props) in products.items():
if uuid is None:
logger.info('Product %s - %s', product_id, props['summary']) # depends on [control=['if'], data=[]]
else: # querying uuids has no summary key
logger.info('Product %s - %s - %s MB', product_id, props['title'], round(int(props['size']) / (1024.0 * 1024.0), 2)) # depends on [control=['for'], data=[]]
if uuid is None:
logger.info('---')
logger.info('%s scenes found with a total size of %.2f GB', len(products), api.get_products_size(products)) # depends on [control=['if'], data=[]]
|
def _create_activity2(self, parent, name, activity_type=ActivityType.TASK):
"""Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided
"""
# WIM1: activity_class, WIM2: activity_type
if self.match_app_version(label='wim', version='<2.0.0', default=True):
raise APIError('This method is only compatible with versions of KE-chain where the internal `wim` module '
'has a version >=2.0.0. Use the `Client.create_activity()` method.')
if activity_type and activity_type not in ActivityType.values():
raise IllegalArgumentError("Please provide accepted activity_type (provided:{} accepted:{})".
format(activity_type, ActivityType.values()))
if isinstance(parent, (Activity, Activity2)):
parent = parent.id
elif is_uuid(parent):
parent = parent
else:
raise IllegalArgumentError("Please provide either an activity object or a UUID")
data = {
"name": name,
"parent_id": parent,
"activity_type": activity_type
}
response = self._request('POST', self._build_url('activities'), data=data,
params=API_EXTRA_PARAMS['activities'])
if response.status_code != requests.codes.created: # pragma: no cover
raise APIError("Could not create activity")
data = response.json()
return Activity2(data['results'][0], client=self)
|
def function[_create_activity2, parameter[self, parent, name, activity_type]]:
constant[Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided
]
if call[name[self].match_app_version, parameter[]] begin[:]
<ast.Raise object at 0x7da1b24845e0>
if <ast.BoolOp object at 0x7da1b24846d0> begin[:]
<ast.Raise object at 0x7da1b2485b10>
if call[name[isinstance], parameter[name[parent], tuple[[<ast.Name object at 0x7da1b2485960>, <ast.Name object at 0x7da1b24874c0>]]]] begin[:]
variable[parent] assign[=] name[parent].id
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2487c40>, <ast.Constant object at 0x7da1b2486650>, <ast.Constant object at 0x7da1b2486bc0>], [<ast.Name object at 0x7da1b24850c0>, <ast.Name object at 0x7da1b2486560>, <ast.Name object at 0x7da20c6a81f0>]]
variable[response] assign[=] call[name[self]._request, parameter[constant[POST], call[name[self]._build_url, parameter[constant[activities]]]]]
if compare[name[response].status_code not_equal[!=] name[requests].codes.created] begin[:]
<ast.Raise object at 0x7da20c6ab4c0>
variable[data] assign[=] call[name[response].json, parameter[]]
return[call[name[Activity2], parameter[call[call[name[data]][constant[results]]][constant[0]]]]]
|
keyword[def] identifier[_create_activity2] ( identifier[self] , identifier[parent] , identifier[name] , identifier[activity_type] = identifier[ActivityType] . identifier[TASK] ):
literal[string]
keyword[if] identifier[self] . identifier[match_app_version] ( identifier[label] = literal[string] , identifier[version] = literal[string] , identifier[default] = keyword[True] ):
keyword[raise] identifier[APIError] ( literal[string]
literal[string] )
keyword[if] identifier[activity_type] keyword[and] identifier[activity_type] keyword[not] keyword[in] identifier[ActivityType] . identifier[values] ():
keyword[raise] identifier[IllegalArgumentError] ( literal[string] .
identifier[format] ( identifier[activity_type] , identifier[ActivityType] . identifier[values] ()))
keyword[if] identifier[isinstance] ( identifier[parent] ,( identifier[Activity] , identifier[Activity2] )):
identifier[parent] = identifier[parent] . identifier[id]
keyword[elif] identifier[is_uuid] ( identifier[parent] ):
identifier[parent] = identifier[parent]
keyword[else] :
keyword[raise] identifier[IllegalArgumentError] ( literal[string] )
identifier[data] ={
literal[string] : identifier[name] ,
literal[string] : identifier[parent] ,
literal[string] : identifier[activity_type]
}
identifier[response] = identifier[self] . identifier[_request] ( literal[string] , identifier[self] . identifier[_build_url] ( literal[string] ), identifier[data] = identifier[data] ,
identifier[params] = identifier[API_EXTRA_PARAMS] [ literal[string] ])
keyword[if] identifier[response] . identifier[status_code] != identifier[requests] . identifier[codes] . identifier[created] :
keyword[raise] identifier[APIError] ( literal[string] )
identifier[data] = identifier[response] . identifier[json] ()
keyword[return] identifier[Activity2] ( identifier[data] [ literal[string] ][ literal[int] ], identifier[client] = identifier[self] )
|
def _create_activity2(self, parent, name, activity_type=ActivityType.TASK):
"""Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided
"""
# WIM1: activity_class, WIM2: activity_type
if self.match_app_version(label='wim', version='<2.0.0', default=True):
raise APIError('This method is only compatible with versions of KE-chain where the internal `wim` module has a version >=2.0.0. Use the `Client.create_activity()` method.') # depends on [control=['if'], data=[]]
if activity_type and activity_type not in ActivityType.values():
raise IllegalArgumentError('Please provide accepted activity_type (provided:{} accepted:{})'.format(activity_type, ActivityType.values())) # depends on [control=['if'], data=[]]
if isinstance(parent, (Activity, Activity2)):
parent = parent.id # depends on [control=['if'], data=[]]
elif is_uuid(parent):
parent = parent # depends on [control=['if'], data=[]]
else:
raise IllegalArgumentError('Please provide either an activity object or a UUID')
data = {'name': name, 'parent_id': parent, 'activity_type': activity_type}
response = self._request('POST', self._build_url('activities'), data=data, params=API_EXTRA_PARAMS['activities'])
if response.status_code != requests.codes.created: # pragma: no cover
raise APIError('Could not create activity') # depends on [control=['if'], data=[]]
data = response.json()
return Activity2(data['results'][0], client=self)
|
def set_location(self, obj, cursor):
""" Location is also used for codegeneration ordering."""
if (hasattr(cursor, 'location') and cursor.location is not None and
cursor.location.file is not None):
obj.location = (cursor.location.file.name, cursor.location.line)
return
|
def function[set_location, parameter[self, obj, cursor]]:
constant[ Location is also used for codegeneration ordering.]
if <ast.BoolOp object at 0x7da1b2345f90> begin[:]
name[obj].location assign[=] tuple[[<ast.Attribute object at 0x7da1b2346170>, <ast.Attribute object at 0x7da1b23456c0>]]
return[None]
|
keyword[def] identifier[set_location] ( identifier[self] , identifier[obj] , identifier[cursor] ):
literal[string]
keyword[if] ( identifier[hasattr] ( identifier[cursor] , literal[string] ) keyword[and] identifier[cursor] . identifier[location] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[cursor] . identifier[location] . identifier[file] keyword[is] keyword[not] keyword[None] ):
identifier[obj] . identifier[location] =( identifier[cursor] . identifier[location] . identifier[file] . identifier[name] , identifier[cursor] . identifier[location] . identifier[line] )
keyword[return]
|
def set_location(self, obj, cursor):
""" Location is also used for codegeneration ordering."""
if hasattr(cursor, 'location') and cursor.location is not None and (cursor.location.file is not None):
obj.location = (cursor.location.file.name, cursor.location.line) # depends on [control=['if'], data=[]]
return
|
def _rm_compute_leading_space_alig(space_pres_split, seq):
"""
count the number of characters that precede the sequence in a repeatmasker
alignment line. E.g. in the following line:
' chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41'
the answer would be 24.
:param space_pres_split: the alignment line, split into tokens around spaces,
but with the spaces conserved as tokens.
:param seq: the sequence token.
"""
c = 0
for i in range(0, len(space_pres_split)):
if space_pres_split[i] == seq:
break
c += len(space_pres_split[i])
return c
|
def function[_rm_compute_leading_space_alig, parameter[space_pres_split, seq]]:
constant[
count the number of characters that precede the sequence in a repeatmasker
alignment line. E.g. in the following line:
' chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41'
the answer would be 24.
:param space_pres_split: the alignment line, split into tokens around spaces,
but with the spaces conserved as tokens.
:param seq: the sequence token.
]
variable[c] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[space_pres_split]]]]]] begin[:]
if compare[call[name[space_pres_split]][name[i]] equal[==] name[seq]] begin[:]
break
<ast.AugAssign object at 0x7da1b15b55a0>
return[name[c]]
|
keyword[def] identifier[_rm_compute_leading_space_alig] ( identifier[space_pres_split] , identifier[seq] ):
literal[string]
identifier[c] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[space_pres_split] )):
keyword[if] identifier[space_pres_split] [ identifier[i] ]== identifier[seq] :
keyword[break]
identifier[c] += identifier[len] ( identifier[space_pres_split] [ identifier[i] ])
keyword[return] identifier[c]
|
def _rm_compute_leading_space_alig(space_pres_split, seq):
"""
count the number of characters that precede the sequence in a repeatmasker
alignment line. E.g. in the following line:
' chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41'
the answer would be 24.
:param space_pres_split: the alignment line, split into tokens around spaces,
but with the spaces conserved as tokens.
:param seq: the sequence token.
"""
c = 0
for i in range(0, len(space_pres_split)):
if space_pres_split[i] == seq:
break # depends on [control=['if'], data=[]]
c += len(space_pres_split[i]) # depends on [control=['for'], data=['i']]
return c
|
def field_exists(self, well_x, well_y, field_x, field_y):
"Check if field exists ScanFieldArray."
return self.field(well_x, well_y, field_x, field_y) != None
|
def function[field_exists, parameter[self, well_x, well_y, field_x, field_y]]:
constant[Check if field exists ScanFieldArray.]
return[compare[call[name[self].field, parameter[name[well_x], name[well_y], name[field_x], name[field_y]]] not_equal[!=] constant[None]]]
|
keyword[def] identifier[field_exists] ( identifier[self] , identifier[well_x] , identifier[well_y] , identifier[field_x] , identifier[field_y] ):
literal[string]
keyword[return] identifier[self] . identifier[field] ( identifier[well_x] , identifier[well_y] , identifier[field_x] , identifier[field_y] )!= keyword[None]
|
def field_exists(self, well_x, well_y, field_x, field_y):
"""Check if field exists ScanFieldArray."""
return self.field(well_x, well_y, field_x, field_y) != None
|
def contains(self, key):
"Exact matching."
index = self.follow_bytes(key, self.ROOT)
if index is None:
return False
return self.has_value(index)
|
def function[contains, parameter[self, key]]:
constant[Exact matching.]
variable[index] assign[=] call[name[self].follow_bytes, parameter[name[key], name[self].ROOT]]
if compare[name[index] is constant[None]] begin[:]
return[constant[False]]
return[call[name[self].has_value, parameter[name[index]]]]
|
keyword[def] identifier[contains] ( identifier[self] , identifier[key] ):
literal[string]
identifier[index] = identifier[self] . identifier[follow_bytes] ( identifier[key] , identifier[self] . identifier[ROOT] )
keyword[if] identifier[index] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[return] identifier[self] . identifier[has_value] ( identifier[index] )
|
def contains(self, key):
"""Exact matching."""
index = self.follow_bytes(key, self.ROOT)
if index is None:
return False # depends on [control=['if'], data=[]]
return self.has_value(index)
|
def open_file(path_arg, mode='r'):
"""Decorator to ensure clean opening and closing of files.
Parameters
----------
path_arg : int
Location of the path argument in args. Even if the argument is a
named positional argument (with a default value), you must specify its
index as a positional argument.
mode : str
String for opening mode.
Returns
-------
_open_file : function
Function which cleanly executes the io.
Examples
--------
Decorate functions like this::
@open_file(0,'r')
def read_function(pathname):
pass
@open_file(1,'w')
def write_function(G,pathname):
pass
@open_file(1,'w')
def write_function(G, pathname='graph.dot')
pass
@open_file('path', 'w+')
def another_function(arg, **kwargs):
path = kwargs['path']
pass
"""
# Note that this decorator solves the problem when a path argument is
# specified as a string, but it does not handle the situation when the
# function wants to accept a default of None (and then handle it).
# Here is an example:
#
# @open_file('path')
# def some_function(arg1, arg2, path=None):
# if path is None:
# fobj = tempfile.NamedTemporaryFile(delete=False)
# close_fobj = True
# else:
# # `path` could have been a string or file object or something
# # similar. In any event, the decorator has given us a file object
# # and it will close it for us, if it should.
# fobj = path
# close_fobj = False
#
# try:
# fobj.write('blah')
# finally:
# if close_fobj:
# fobj.close()
#
# Normally, we'd want to use "with" to ensure that fobj gets closed.
# However, recall that the decorator will make `path` a file object for
# us, and using "with" would undesirably close that file object. Instead,
# you use a try block, as shown above. When we exit the function, fobj will
# be closed, if it should be, by the decorator.
@decorator
def _open_file(func, *args, **kwargs):
# Note that since we have used @decorator, *args, and **kwargs have
# already been resolved to match the function signature of func. This
# means default values have been propagated. For example, the function
# func(x, y, a=1, b=2, **kwargs) if called as func(0,1,b=5,c=10) would
# have args=(0,1,1,5) and kwargs={'c':10}.
# First we parse the arguments of the decorator. The path_arg could
# be an positional argument or a keyword argument. Even if it is
try:
# path_arg is a required positional argument
# This works precisely because we are using @decorator
path = args[path_arg]
except TypeError:
# path_arg is a keyword argument. It is "required" in the sense
# that it must exist, according to the decorator specification,
# It can exist in `kwargs` by a developer specified default value
# or it could have been explicitly set by the user.
try:
path = kwargs[path_arg]
except KeyError:
# Could not find the keyword. Thus, no default was specified
# in the function signature and the user did not provide it.
msg = 'Missing required keyword argument: {0}'
raise nx.NetworkXError(msg.format(path_arg))
else:
is_kwarg = True
except IndexError:
# A "required" argument was missing. This can only happen if
# the decorator of the function was incorrectly specified.
# So this probably is not a user error, but a developer error.
msg = "path_arg of open_file decorator is incorrect"
raise nx.NetworkXError(msg)
else:
is_kwarg = False
# Now we have the path_arg. There are two types of input to consider:
# 1) string representing a path that should be opened
# 2) an already opened file object
if is_string_like(path):
ext = splitext(path)[1]
fobj = _dispatch_dict[ext](path, mode=mode)
close_fobj = True
elif hasattr(path, 'read'):
# path is already a file-like object
fobj = path
close_fobj = False
else:
# could be None, in which case the algorithm will deal with it
fobj = path
close_fobj = False
# Insert file object into args or kwargs.
if is_kwarg:
new_args = args
kwargs[path_arg] = fobj
else:
# args is a tuple, so we must convert to list before modifying it.
new_args = list(args)
new_args[path_arg] = fobj
# Finally, we call the original function, making sure to close the fobj.
try:
result = func(*new_args, **kwargs)
finally:
if close_fobj:
fobj.close()
return result
return _open_file
|
def function[open_file, parameter[path_arg, mode]]:
constant[Decorator to ensure clean opening and closing of files.
Parameters
----------
path_arg : int
Location of the path argument in args. Even if the argument is a
named positional argument (with a default value), you must specify its
index as a positional argument.
mode : str
String for opening mode.
Returns
-------
_open_file : function
Function which cleanly executes the io.
Examples
--------
Decorate functions like this::
@open_file(0,'r')
def read_function(pathname):
pass
@open_file(1,'w')
def write_function(G,pathname):
pass
@open_file(1,'w')
def write_function(G, pathname='graph.dot')
pass
@open_file('path', 'w+')
def another_function(arg, **kwargs):
path = kwargs['path']
pass
]
def function[_open_file, parameter[func]]:
<ast.Try object at 0x7da1b0414820>
if call[name[is_string_like], parameter[name[path]]] begin[:]
variable[ext] assign[=] call[call[name[splitext], parameter[name[path]]]][constant[1]]
variable[fobj] assign[=] call[call[name[_dispatch_dict]][name[ext]], parameter[name[path]]]
variable[close_fobj] assign[=] constant[True]
if name[is_kwarg] begin[:]
variable[new_args] assign[=] name[args]
call[name[kwargs]][name[path_arg]] assign[=] name[fobj]
<ast.Try object at 0x7da1b0467460>
return[name[result]]
return[name[_open_file]]
|
keyword[def] identifier[open_file] ( identifier[path_arg] , identifier[mode] = literal[string] ):
literal[string]
@ identifier[decorator]
keyword[def] identifier[_open_file] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
identifier[path] = identifier[args] [ identifier[path_arg] ]
keyword[except] identifier[TypeError] :
keyword[try] :
identifier[path] = identifier[kwargs] [ identifier[path_arg] ]
keyword[except] identifier[KeyError] :
identifier[msg] = literal[string]
keyword[raise] identifier[nx] . identifier[NetworkXError] ( identifier[msg] . identifier[format] ( identifier[path_arg] ))
keyword[else] :
identifier[is_kwarg] = keyword[True]
keyword[except] identifier[IndexError] :
identifier[msg] = literal[string]
keyword[raise] identifier[nx] . identifier[NetworkXError] ( identifier[msg] )
keyword[else] :
identifier[is_kwarg] = keyword[False]
keyword[if] identifier[is_string_like] ( identifier[path] ):
identifier[ext] = identifier[splitext] ( identifier[path] )[ literal[int] ]
identifier[fobj] = identifier[_dispatch_dict] [ identifier[ext] ]( identifier[path] , identifier[mode] = identifier[mode] )
identifier[close_fobj] = keyword[True]
keyword[elif] identifier[hasattr] ( identifier[path] , literal[string] ):
identifier[fobj] = identifier[path]
identifier[close_fobj] = keyword[False]
keyword[else] :
identifier[fobj] = identifier[path]
identifier[close_fobj] = keyword[False]
keyword[if] identifier[is_kwarg] :
identifier[new_args] = identifier[args]
identifier[kwargs] [ identifier[path_arg] ]= identifier[fobj]
keyword[else] :
identifier[new_args] = identifier[list] ( identifier[args] )
identifier[new_args] [ identifier[path_arg] ]= identifier[fobj]
keyword[try] :
identifier[result] = identifier[func] (* identifier[new_args] ,** identifier[kwargs] )
keyword[finally] :
keyword[if] identifier[close_fobj] :
identifier[fobj] . identifier[close] ()
keyword[return] identifier[result]
keyword[return] identifier[_open_file]
|
def open_file(path_arg, mode='r'):
"""Decorator to ensure clean opening and closing of files.
Parameters
----------
path_arg : int
Location of the path argument in args. Even if the argument is a
named positional argument (with a default value), you must specify its
index as a positional argument.
mode : str
String for opening mode.
Returns
-------
_open_file : function
Function which cleanly executes the io.
Examples
--------
Decorate functions like this::
@open_file(0,'r')
def read_function(pathname):
pass
@open_file(1,'w')
def write_function(G,pathname):
pass
@open_file(1,'w')
def write_function(G, pathname='graph.dot')
pass
@open_file('path', 'w+')
def another_function(arg, **kwargs):
path = kwargs['path']
pass
"""
# Note that this decorator solves the problem when a path argument is
# specified as a string, but it does not handle the situation when the
# function wants to accept a default of None (and then handle it).
# Here is an example:
#
# @open_file('path')
# def some_function(arg1, arg2, path=None):
# if path is None:
# fobj = tempfile.NamedTemporaryFile(delete=False)
# close_fobj = True
# else:
# # `path` could have been a string or file object or something
# # similar. In any event, the decorator has given us a file object
# # and it will close it for us, if it should.
# fobj = path
# close_fobj = False
#
# try:
# fobj.write('blah')
# finally:
# if close_fobj:
# fobj.close()
#
# Normally, we'd want to use "with" to ensure that fobj gets closed.
# However, recall that the decorator will make `path` a file object for
# us, and using "with" would undesirably close that file object. Instead,
# you use a try block, as shown above. When we exit the function, fobj will
# be closed, if it should be, by the decorator.
@decorator
def _open_file(func, *args, **kwargs):
# Note that since we have used @decorator, *args, and **kwargs have
# already been resolved to match the function signature of func. This
# means default values have been propagated. For example, the function
# func(x, y, a=1, b=2, **kwargs) if called as func(0,1,b=5,c=10) would
# have args=(0,1,1,5) and kwargs={'c':10}.
# First we parse the arguments of the decorator. The path_arg could
# be an positional argument or a keyword argument. Even if it is
try:
# path_arg is a required positional argument
# This works precisely because we are using @decorator
path = args[path_arg] # depends on [control=['try'], data=[]]
except TypeError:
# path_arg is a keyword argument. It is "required" in the sense
# that it must exist, according to the decorator specification,
# It can exist in `kwargs` by a developer specified default value
# or it could have been explicitly set by the user.
try:
path = kwargs[path_arg] # depends on [control=['try'], data=[]]
except KeyError:
# Could not find the keyword. Thus, no default was specified
# in the function signature and the user did not provide it.
msg = 'Missing required keyword argument: {0}'
raise nx.NetworkXError(msg.format(path_arg)) # depends on [control=['except'], data=[]]
else:
is_kwarg = True # depends on [control=['except'], data=[]]
except IndexError:
# A "required" argument was missing. This can only happen if
# the decorator of the function was incorrectly specified.
# So this probably is not a user error, but a developer error.
msg = 'path_arg of open_file decorator is incorrect'
raise nx.NetworkXError(msg) # depends on [control=['except'], data=[]]
else:
is_kwarg = False
# Now we have the path_arg. There are two types of input to consider:
# 1) string representing a path that should be opened
# 2) an already opened file object
if is_string_like(path):
ext = splitext(path)[1]
fobj = _dispatch_dict[ext](path, mode=mode)
close_fobj = True # depends on [control=['if'], data=[]]
elif hasattr(path, 'read'):
# path is already a file-like object
fobj = path
close_fobj = False # depends on [control=['if'], data=[]]
else:
# could be None, in which case the algorithm will deal with it
fobj = path
close_fobj = False
# Insert file object into args or kwargs.
if is_kwarg:
new_args = args
kwargs[path_arg] = fobj # depends on [control=['if'], data=[]]
else:
# args is a tuple, so we must convert to list before modifying it.
new_args = list(args)
new_args[path_arg] = fobj
# Finally, we call the original function, making sure to close the fobj.
try:
result = func(*new_args, **kwargs) # depends on [control=['try'], data=[]]
finally:
if close_fobj:
fobj.close() # depends on [control=['if'], data=[]]
return result
return _open_file
|
def _resolve_capabilities(pkgs, refresh=False, **kwargs):
'''
Resolve capabilities in ``pkgs`` and exchange them with real package
names, when the result is distinct.
This feature can be turned on while setting the paramter
``resolve_capabilities`` to True.
Return the input dictionary with replaced capability names and as
second return value a bool which say if a refresh need to be run.
In case of ``resolve_capabilities`` is False (disabled) or not
supported by the implementation the input is returned unchanged.
'''
if not pkgs or 'pkg.resolve_capabilities' not in __salt__:
return pkgs, refresh
ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs)
return ret, False
|
def function[_resolve_capabilities, parameter[pkgs, refresh]]:
constant[
Resolve capabilities in ``pkgs`` and exchange them with real package
names, when the result is distinct.
This feature can be turned on while setting the paramter
``resolve_capabilities`` to True.
Return the input dictionary with replaced capability names and as
second return value a bool which say if a refresh need to be run.
In case of ``resolve_capabilities`` is False (disabled) or not
supported by the implementation the input is returned unchanged.
]
if <ast.BoolOp object at 0x7da1b212d240> begin[:]
return[tuple[[<ast.Name object at 0x7da1b212c6a0>, <ast.Name object at 0x7da1b212d1e0>]]]
variable[ret] assign[=] call[call[name[__salt__]][constant[pkg.resolve_capabilities]], parameter[name[pkgs]]]
return[tuple[[<ast.Name object at 0x7da1b212ca30>, <ast.Constant object at 0x7da1b212ea70>]]]
|
keyword[def] identifier[_resolve_capabilities] ( identifier[pkgs] , identifier[refresh] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[pkgs] keyword[or] literal[string] keyword[not] keyword[in] identifier[__salt__] :
keyword[return] identifier[pkgs] , identifier[refresh]
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[pkgs] , identifier[refresh] = identifier[refresh] ,** identifier[kwargs] )
keyword[return] identifier[ret] , keyword[False]
|
def _resolve_capabilities(pkgs, refresh=False, **kwargs):
"""
Resolve capabilities in ``pkgs`` and exchange them with real package
names, when the result is distinct.
This feature can be turned on while setting the paramter
``resolve_capabilities`` to True.
Return the input dictionary with replaced capability names and as
second return value a bool which say if a refresh need to be run.
In case of ``resolve_capabilities`` is False (disabled) or not
supported by the implementation the input is returned unchanged.
"""
if not pkgs or 'pkg.resolve_capabilities' not in __salt__:
return (pkgs, refresh) # depends on [control=['if'], data=[]]
ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs)
return (ret, False)
|
def predict_topk(self, dataset, output_type='probability', k=3, output_frequency='per_row'):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+---------------+-------+-------------------+
| row_id | class | probability |
+---------------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+---------------+-------+-------------------+
"""
_tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])
id_target_map = self._id_target_map
preds = self.predict(
dataset, output_type='probability_vector', output_frequency=output_frequency)
if output_frequency == 'per_row':
probs = preds
elif output_frequency == 'per_window':
probs = preds['probability_vector']
if output_type == 'rank':
probs = probs.apply(lambda p: [
{'class': id_target_map[i],
'rank': i}
for i in reversed(_np.argsort(p)[-k:])]
)
elif output_type == 'probability':
probs = probs.apply(lambda p: [
{'class': id_target_map[i],
'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
if output_frequency == 'per_row':
output = _SFrame({'probs': probs})
output = output.add_row_number(column_name='row_id')
elif output_frequency == 'per_window':
output = _SFrame({
'probs': probs,
self.session_id: preds[self.session_id],
'prediction_id': preds['prediction_id']
})
output = output.stack('probs', new_column_name='probs')
output = output.unpack('probs', column_name_prefix='')
return output
|
def function[predict_topk, parameter[self, dataset, output_type, k, output_frequency]]:
constant[
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+---------------+-------+-------------------+
| row_id | class | probability |
+---------------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+---------------+-------+-------------------+
]
call[name[_tkutl]._check_categorical_option_type, parameter[constant[output_type], name[output_type], list[[<ast.Constant object at 0x7da1b200a620>, <ast.Constant object at 0x7da1b200a650>]]]]
variable[id_target_map] assign[=] name[self]._id_target_map
variable[preds] assign[=] call[name[self].predict, parameter[name[dataset]]]
if compare[name[output_frequency] equal[==] constant[per_row]] begin[:]
variable[probs] assign[=] name[preds]
if compare[name[output_type] equal[==] constant[rank]] begin[:]
variable[probs] assign[=] call[name[probs].apply, parameter[<ast.Lambda object at 0x7da1b20095d0>]]
if compare[name[output_frequency] equal[==] constant[per_row]] begin[:]
variable[output] assign[=] call[name[_SFrame], parameter[dictionary[[<ast.Constant object at 0x7da1b2008e20>], [<ast.Name object at 0x7da1b2008cd0>]]]]
variable[output] assign[=] call[name[output].add_row_number, parameter[]]
variable[output] assign[=] call[name[output].stack, parameter[constant[probs]]]
variable[output] assign[=] call[name[output].unpack, parameter[constant[probs]]]
return[name[output]]
|
keyword[def] identifier[predict_topk] ( identifier[self] , identifier[dataset] , identifier[output_type] = literal[string] , identifier[k] = literal[int] , identifier[output_frequency] = literal[string] ):
literal[string]
identifier[_tkutl] . identifier[_check_categorical_option_type] ( literal[string] , identifier[output_type] ,[ literal[string] , literal[string] ])
identifier[id_target_map] = identifier[self] . identifier[_id_target_map]
identifier[preds] = identifier[self] . identifier[predict] (
identifier[dataset] , identifier[output_type] = literal[string] , identifier[output_frequency] = identifier[output_frequency] )
keyword[if] identifier[output_frequency] == literal[string] :
identifier[probs] = identifier[preds]
keyword[elif] identifier[output_frequency] == literal[string] :
identifier[probs] = identifier[preds] [ literal[string] ]
keyword[if] identifier[output_type] == literal[string] :
identifier[probs] = identifier[probs] . identifier[apply] ( keyword[lambda] identifier[p] :[
{ literal[string] : identifier[id_target_map] [ identifier[i] ],
literal[string] : identifier[i] }
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[_np] . identifier[argsort] ( identifier[p] )[- identifier[k] :])]
)
keyword[elif] identifier[output_type] == literal[string] :
identifier[probs] = identifier[probs] . identifier[apply] ( keyword[lambda] identifier[p] :[
{ literal[string] : identifier[id_target_map] [ identifier[i] ],
literal[string] : identifier[p] [ identifier[i] ]}
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[_np] . identifier[argsort] ( identifier[p] )[- identifier[k] :])]
)
keyword[if] identifier[output_frequency] == literal[string] :
identifier[output] = identifier[_SFrame] ({ literal[string] : identifier[probs] })
identifier[output] = identifier[output] . identifier[add_row_number] ( identifier[column_name] = literal[string] )
keyword[elif] identifier[output_frequency] == literal[string] :
identifier[output] = identifier[_SFrame] ({
literal[string] : identifier[probs] ,
identifier[self] . identifier[session_id] : identifier[preds] [ identifier[self] . identifier[session_id] ],
literal[string] : identifier[preds] [ literal[string] ]
})
identifier[output] = identifier[output] . identifier[stack] ( literal[string] , identifier[new_column_name] = literal[string] )
identifier[output] = identifier[output] . identifier[unpack] ( literal[string] , identifier[column_name_prefix] = literal[string] )
keyword[return] identifier[output]
|
def predict_topk(self, dataset, output_type='probability', k=3, output_frequency='per_row'):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+---------------+-------+-------------------+
| row_id | class | probability |
+---------------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+---------------+-------+-------------------+
"""
_tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])
id_target_map = self._id_target_map
preds = self.predict(dataset, output_type='probability_vector', output_frequency=output_frequency)
if output_frequency == 'per_row':
probs = preds # depends on [control=['if'], data=[]]
elif output_frequency == 'per_window':
probs = preds['probability_vector'] # depends on [control=['if'], data=[]]
if output_type == 'rank':
probs = probs.apply(lambda p: [{'class': id_target_map[i], 'rank': i} for i in reversed(_np.argsort(p)[-k:])]) # depends on [control=['if'], data=[]]
elif output_type == 'probability':
probs = probs.apply(lambda p: [{'class': id_target_map[i], 'probability': p[i]} for i in reversed(_np.argsort(p)[-k:])]) # depends on [control=['if'], data=[]]
if output_frequency == 'per_row':
output = _SFrame({'probs': probs})
output = output.add_row_number(column_name='row_id') # depends on [control=['if'], data=[]]
elif output_frequency == 'per_window':
output = _SFrame({'probs': probs, self.session_id: preds[self.session_id], 'prediction_id': preds['prediction_id']}) # depends on [control=['if'], data=[]]
output = output.stack('probs', new_column_name='probs')
output = output.unpack('probs', column_name_prefix='')
return output
|
def set_camera_enabled(self, camera_id, is_enabled):
"""Turn Arlo camera On/Off.
:param mode: True, False
"""
self.publish(
action='set',
resource='privacy',
camera_id=camera_id,
mode=is_enabled,
publish_response=True)
self.update()
|
def function[set_camera_enabled, parameter[self, camera_id, is_enabled]]:
constant[Turn Arlo camera On/Off.
:param mode: True, False
]
call[name[self].publish, parameter[]]
call[name[self].update, parameter[]]
|
keyword[def] identifier[set_camera_enabled] ( identifier[self] , identifier[camera_id] , identifier[is_enabled] ):
literal[string]
identifier[self] . identifier[publish] (
identifier[action] = literal[string] ,
identifier[resource] = literal[string] ,
identifier[camera_id] = identifier[camera_id] ,
identifier[mode] = identifier[is_enabled] ,
identifier[publish_response] = keyword[True] )
identifier[self] . identifier[update] ()
|
def set_camera_enabled(self, camera_id, is_enabled):
"""Turn Arlo camera On/Off.
:param mode: True, False
"""
self.publish(action='set', resource='privacy', camera_id=camera_id, mode=is_enabled, publish_response=True)
self.update()
|
def editorValue(self, editor):
"""
Returns the value from the editor for this widget.
:param editor | <QWidget> || None
:return <variant> value
"""
value, success = projexui.widgetValue(editor)
if not success:
return None
return value
|
def function[editorValue, parameter[self, editor]]:
constant[
Returns the value from the editor for this widget.
:param editor | <QWidget> || None
:return <variant> value
]
<ast.Tuple object at 0x7da204620e80> assign[=] call[name[projexui].widgetValue, parameter[name[editor]]]
if <ast.UnaryOp object at 0x7da2046215d0> begin[:]
return[constant[None]]
return[name[value]]
|
keyword[def] identifier[editorValue] ( identifier[self] , identifier[editor] ):
literal[string]
identifier[value] , identifier[success] = identifier[projexui] . identifier[widgetValue] ( identifier[editor] )
keyword[if] keyword[not] identifier[success] :
keyword[return] keyword[None]
keyword[return] identifier[value]
|
def editorValue(self, editor):
"""
Returns the value from the editor for this widget.
:param editor | <QWidget> || None
:return <variant> value
"""
(value, success) = projexui.widgetValue(editor)
if not success:
return None # depends on [control=['if'], data=[]]
return value
|
def fehalf(self):
"""get the KL parcov scaling matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
fehalf : pyemu.Matrix
"""
if self.__fehalf != None:
return self.__fehalf
self.log("fehalf")
self.__fehalf = self.parcov.u * (self.parcov.s ** (0.5))
self.log("fehalf")
return self.__fehalf
|
def function[fehalf, parameter[self]]:
constant[get the KL parcov scaling matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
fehalf : pyemu.Matrix
]
if compare[name[self].__fehalf not_equal[!=] constant[None]] begin[:]
return[name[self].__fehalf]
call[name[self].log, parameter[constant[fehalf]]]
name[self].__fehalf assign[=] binary_operation[name[self].parcov.u * binary_operation[name[self].parcov.s ** constant[0.5]]]
call[name[self].log, parameter[constant[fehalf]]]
return[name[self].__fehalf]
|
keyword[def] identifier[fehalf] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__fehalf] != keyword[None] :
keyword[return] identifier[self] . identifier[__fehalf]
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[__fehalf] = identifier[self] . identifier[parcov] . identifier[u] *( identifier[self] . identifier[parcov] . identifier[s] **( literal[int] ))
identifier[self] . identifier[log] ( literal[string] )
keyword[return] identifier[self] . identifier[__fehalf]
|
def fehalf(self):
"""get the KL parcov scaling matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
fehalf : pyemu.Matrix
"""
if self.__fehalf != None:
return self.__fehalf # depends on [control=['if'], data=[]]
self.log('fehalf')
self.__fehalf = self.parcov.u * self.parcov.s ** 0.5
self.log('fehalf')
return self.__fehalf
|
def p_file_lic_conc(self, f_term, predicate):
"""Sets file licenses concluded."""
try:
for _, _, licenses in self.graph.triples((f_term, predicate, None)):
if (licenses, RDF.type, self.spdx_namespace['ConjunctiveLicenseSet']) in self.graph:
lics = self.handle_conjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics)
elif (licenses, RDF.type, self.spdx_namespace['DisjunctiveLicenseSet']) in self.graph:
lics = self.handle_disjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics)
else:
try:
lics = self.handle_lics(licenses)
self.builder.set_concluded_license(self.doc, lics)
except SPDXValueError:
self.value_error('FILE_SINGLE_LICS', licenses)
except CardinalityError:
self.more_than_one_error('file {0}'.format(predicate))
|
def function[p_file_lic_conc, parameter[self, f_term, predicate]]:
constant[Sets file licenses concluded.]
<ast.Try object at 0x7da1b015b610>
|
keyword[def] identifier[p_file_lic_conc] ( identifier[self] , identifier[f_term] , identifier[predicate] ):
literal[string]
keyword[try] :
keyword[for] identifier[_] , identifier[_] , identifier[licenses] keyword[in] identifier[self] . identifier[graph] . identifier[triples] (( identifier[f_term] , identifier[predicate] , keyword[None] )):
keyword[if] ( identifier[licenses] , identifier[RDF] . identifier[type] , identifier[self] . identifier[spdx_namespace] [ literal[string] ]) keyword[in] identifier[self] . identifier[graph] :
identifier[lics] = identifier[self] . identifier[handle_conjunctive_list] ( identifier[licenses] )
identifier[self] . identifier[builder] . identifier[set_concluded_license] ( identifier[self] . identifier[doc] , identifier[lics] )
keyword[elif] ( identifier[licenses] , identifier[RDF] . identifier[type] , identifier[self] . identifier[spdx_namespace] [ literal[string] ]) keyword[in] identifier[self] . identifier[graph] :
identifier[lics] = identifier[self] . identifier[handle_disjunctive_list] ( identifier[licenses] )
identifier[self] . identifier[builder] . identifier[set_concluded_license] ( identifier[self] . identifier[doc] , identifier[lics] )
keyword[else] :
keyword[try] :
identifier[lics] = identifier[self] . identifier[handle_lics] ( identifier[licenses] )
identifier[self] . identifier[builder] . identifier[set_concluded_license] ( identifier[self] . identifier[doc] , identifier[lics] )
keyword[except] identifier[SPDXValueError] :
identifier[self] . identifier[value_error] ( literal[string] , identifier[licenses] )
keyword[except] identifier[CardinalityError] :
identifier[self] . identifier[more_than_one_error] ( literal[string] . identifier[format] ( identifier[predicate] ))
|
def p_file_lic_conc(self, f_term, predicate):
"""Sets file licenses concluded."""
try:
for (_, _, licenses) in self.graph.triples((f_term, predicate, None)):
if (licenses, RDF.type, self.spdx_namespace['ConjunctiveLicenseSet']) in self.graph:
lics = self.handle_conjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics) # depends on [control=['if'], data=[]]
elif (licenses, RDF.type, self.spdx_namespace['DisjunctiveLicenseSet']) in self.graph:
lics = self.handle_disjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics) # depends on [control=['if'], data=[]]
else:
try:
lics = self.handle_lics(licenses)
self.builder.set_concluded_license(self.doc, lics) # depends on [control=['try'], data=[]]
except SPDXValueError:
self.value_error('FILE_SINGLE_LICS', licenses) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except CardinalityError:
self.more_than_one_error('file {0}'.format(predicate)) # depends on [control=['except'], data=[]]
|
def js(request):
"""Returns the javascript needed to run persona"""
userid = authenticated_userid(request)
user = markupsafe.Markup("'%s'")%userid if userid else "null"
redirect_paramater = request.registry['persona.redirect_url_parameter']
came_from = '%s%s' % (request.host_url,
request.GET.get(redirect_paramater, request.path_qs))
data = {
'user': user,
'login': request.route_path(request.registry['persona.login_route']),
'logout': request.route_path(request.registry['persona.logout_route']),
'csrf_token': request.session.get_csrf_token(),
'came_from': came_from,
'request_params': markupsafe.Markup(request.registry['persona.request_params']),
}
template = markupsafe.Markup(pkg_resources.resource_string('pyramid_persona', 'templates/persona.js').decode())
return template % data
|
def function[js, parameter[request]]:
constant[Returns the javascript needed to run persona]
variable[userid] assign[=] call[name[authenticated_userid], parameter[name[request]]]
variable[user] assign[=] <ast.IfExp object at 0x7da1b0a625f0>
variable[redirect_paramater] assign[=] call[name[request].registry][constant[persona.redirect_url_parameter]]
variable[came_from] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0a63580>, <ast.Call object at 0x7da1b0a637c0>]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a60700>, <ast.Constant object at 0x7da1b0a62d10>, <ast.Constant object at 0x7da1b0a626e0>, <ast.Constant object at 0x7da1b0a609a0>, <ast.Constant object at 0x7da1b0a60400>, <ast.Constant object at 0x7da1b0a60370>], [<ast.Name object at 0x7da1b0a63bb0>, <ast.Call object at 0x7da1b0a61a80>, <ast.Call object at 0x7da1b0a63730>, <ast.Call object at 0x7da1b0a62050>, <ast.Name object at 0x7da1b0a62b00>, <ast.Call object at 0x7da1b0a63970>]]
variable[template] assign[=] call[name[markupsafe].Markup, parameter[call[call[name[pkg_resources].resource_string, parameter[constant[pyramid_persona], constant[templates/persona.js]]].decode, parameter[]]]]
return[binary_operation[name[template] <ast.Mod object at 0x7da2590d6920> name[data]]]
|
keyword[def] identifier[js] ( identifier[request] ):
literal[string]
identifier[userid] = identifier[authenticated_userid] ( identifier[request] )
identifier[user] = identifier[markupsafe] . identifier[Markup] ( literal[string] )% identifier[userid] keyword[if] identifier[userid] keyword[else] literal[string]
identifier[redirect_paramater] = identifier[request] . identifier[registry] [ literal[string] ]
identifier[came_from] = literal[string] %( identifier[request] . identifier[host_url] ,
identifier[request] . identifier[GET] . identifier[get] ( identifier[redirect_paramater] , identifier[request] . identifier[path_qs] ))
identifier[data] ={
literal[string] : identifier[user] ,
literal[string] : identifier[request] . identifier[route_path] ( identifier[request] . identifier[registry] [ literal[string] ]),
literal[string] : identifier[request] . identifier[route_path] ( identifier[request] . identifier[registry] [ literal[string] ]),
literal[string] : identifier[request] . identifier[session] . identifier[get_csrf_token] (),
literal[string] : identifier[came_from] ,
literal[string] : identifier[markupsafe] . identifier[Markup] ( identifier[request] . identifier[registry] [ literal[string] ]),
}
identifier[template] = identifier[markupsafe] . identifier[Markup] ( identifier[pkg_resources] . identifier[resource_string] ( literal[string] , literal[string] ). identifier[decode] ())
keyword[return] identifier[template] % identifier[data]
|
def js(request):
"""Returns the javascript needed to run persona"""
userid = authenticated_userid(request)
user = markupsafe.Markup("'%s'") % userid if userid else 'null'
redirect_paramater = request.registry['persona.redirect_url_parameter']
came_from = '%s%s' % (request.host_url, request.GET.get(redirect_paramater, request.path_qs))
data = {'user': user, 'login': request.route_path(request.registry['persona.login_route']), 'logout': request.route_path(request.registry['persona.logout_route']), 'csrf_token': request.session.get_csrf_token(), 'came_from': came_from, 'request_params': markupsafe.Markup(request.registry['persona.request_params'])}
template = markupsafe.Markup(pkg_resources.resource_string('pyramid_persona', 'templates/persona.js').decode())
return template % data
|
def start_instance(
self,
key_name,
public_key_path,
private_key_path,
security_group,
flavor,
image,
image_userdata,
location=None,
base_name=None,
username=None,
node_name=None,
host_name=None,
use_public_ips=None,
wait_timeout=None,
use_short_vm_names=None,
n_cloud_services=None,
n_storage_accounts=None,
**kwargs):
"""Starts a new instance on the cloud using the given properties.
Multiple instances might be started in different threads at the same
time. The implementation should handle any problems regarding this
itself.
:return: str - instance id of the started instance
"""
if self._start_failed:
raise Exception('start_instance for node %s: failing due to'
' previous errors.' % node_name)
index = None
with self._resource_lock:
# it'd be nice if elasticluster called something like
# init_cluster() with all the args that will be the
# same for every node created. But since it doesn't, handle that on
# first start_instance call.
if not self._cluster_prep_done:
self._times['CLUSTER_START'] = time.time()
self._config.setup(
key_name,
public_key_path,
private_key_path,
security_group,
location,
base_name=base_name,
username=username,
use_public_ips=use_public_ips,
wait_timeout=wait_timeout,
use_short_vm_names=use_short_vm_names,
n_cloud_services=n_cloud_services,
n_storage_accounts=n_storage_accounts,
**kwargs)
# we know we're starting the first node, so create global
# requirements now
self._create_global_reqs()
if self._start_failed:
return None
# this will allow vms to be created
self._times['SETUP_DONE'] = time.time()
self._cluster_prep_done = True
# absolute node index in cluster (0..n-1) determines what
# subscription, cloud service, storage
# account, etc. this VM will use. Create the vm and add it to
# its cloud service, then try to start it.
index = self._n_instances
v_m = AzureVM(
self._config,
index,
flavor=flavor,
image=image,
node_name=node_name,
host_name=host_name,
image_userdata=image_userdata)
v_m._cloud_service._instances[v_m._qualified_name] = v_m
try:
v_m._cloud_service._start_vm(v_m)
except Exception:
log.error(traceback.format_exc())
log.error("setting start_failed flag. Will not "
"try to start further nodes.")
self._start_failed = True
return None
log.debug('started instance %s', v_m._qualified_name)
if index == self._config._n_vms_requested - 1:
# all nodes started
self._times['NODES_STARTED'] = time.time()
self._times['SETUP_ELAPSED'] = self._times['SETUP_DONE'] - \
self._times['CLUSTER_START']
self._times['NODE_START_ELAPSED'] = self._times['NODES_STARTED']\
- self._times['SETUP_DONE']
self._times['CLUSTER_START_ELAPSED'] = \
self._times['SETUP_ELAPSED'] + \
self._times['NODE_START_ELAPSED']
log.debug("setup time: %.1f sec", self._times['SETUP_ELAPSED'])
log.debug("node start time: %.1f sec (%.1f sec per vm)",
self._times['NODE_START_ELAPSED'],
self._times['NODE_START_ELAPSED'] /
self._config._n_vms_requested)
log.debug("total cluster start time: %.1f sec (%.1f sec per vm)",
self._times['CLUSTER_START_ELAPSED'],
self._times['CLUSTER_START_ELAPSED'] /
self._config._n_vms_requested)
# pause here to try to address the fact that Ansible setup fails
# more often on the first try than subsequent tries
time.sleep(_retry_sleep())
self._save_or_update() # store our state
return v_m._qualified_name
|
def function[start_instance, parameter[self, key_name, public_key_path, private_key_path, security_group, flavor, image, image_userdata, location, base_name, username, node_name, host_name, use_public_ips, wait_timeout, use_short_vm_names, n_cloud_services, n_storage_accounts]]:
constant[Starts a new instance on the cloud using the given properties.
Multiple instances might be started in different threads at the same
time. The implementation should handle any problems regarding this
itself.
:return: str - instance id of the started instance
]
if name[self]._start_failed begin[:]
<ast.Raise object at 0x7da2054a7a90>
variable[index] assign[=] constant[None]
with name[self]._resource_lock begin[:]
if <ast.UnaryOp object at 0x7da2054a6c80> begin[:]
call[name[self]._times][constant[CLUSTER_START]] assign[=] call[name[time].time, parameter[]]
call[name[self]._config.setup, parameter[name[key_name], name[public_key_path], name[private_key_path], name[security_group], name[location]]]
call[name[self]._create_global_reqs, parameter[]]
if name[self]._start_failed begin[:]
return[constant[None]]
call[name[self]._times][constant[SETUP_DONE]] assign[=] call[name[time].time, parameter[]]
name[self]._cluster_prep_done assign[=] constant[True]
variable[index] assign[=] name[self]._n_instances
variable[v_m] assign[=] call[name[AzureVM], parameter[name[self]._config, name[index]]]
call[name[v_m]._cloud_service._instances][name[v_m]._qualified_name] assign[=] name[v_m]
<ast.Try object at 0x7da2054a7a60>
call[name[log].debug, parameter[constant[started instance %s], name[v_m]._qualified_name]]
if compare[name[index] equal[==] binary_operation[name[self]._config._n_vms_requested - constant[1]]] begin[:]
call[name[self]._times][constant[NODES_STARTED]] assign[=] call[name[time].time, parameter[]]
call[name[self]._times][constant[SETUP_ELAPSED]] assign[=] binary_operation[call[name[self]._times][constant[SETUP_DONE]] - call[name[self]._times][constant[CLUSTER_START]]]
call[name[self]._times][constant[NODE_START_ELAPSED]] assign[=] binary_operation[call[name[self]._times][constant[NODES_STARTED]] - call[name[self]._times][constant[SETUP_DONE]]]
call[name[self]._times][constant[CLUSTER_START_ELAPSED]] assign[=] binary_operation[call[name[self]._times][constant[SETUP_ELAPSED]] + call[name[self]._times][constant[NODE_START_ELAPSED]]]
call[name[log].debug, parameter[constant[setup time: %.1f sec], call[name[self]._times][constant[SETUP_ELAPSED]]]]
call[name[log].debug, parameter[constant[node start time: %.1f sec (%.1f sec per vm)], call[name[self]._times][constant[NODE_START_ELAPSED]], binary_operation[call[name[self]._times][constant[NODE_START_ELAPSED]] / name[self]._config._n_vms_requested]]]
call[name[log].debug, parameter[constant[total cluster start time: %.1f sec (%.1f sec per vm)], call[name[self]._times][constant[CLUSTER_START_ELAPSED]], binary_operation[call[name[self]._times][constant[CLUSTER_START_ELAPSED]] / name[self]._config._n_vms_requested]]]
call[name[time].sleep, parameter[call[name[_retry_sleep], parameter[]]]]
call[name[self]._save_or_update, parameter[]]
return[name[v_m]._qualified_name]
|
keyword[def] identifier[start_instance] (
identifier[self] ,
identifier[key_name] ,
identifier[public_key_path] ,
identifier[private_key_path] ,
identifier[security_group] ,
identifier[flavor] ,
identifier[image] ,
identifier[image_userdata] ,
identifier[location] = keyword[None] ,
identifier[base_name] = keyword[None] ,
identifier[username] = keyword[None] ,
identifier[node_name] = keyword[None] ,
identifier[host_name] = keyword[None] ,
identifier[use_public_ips] = keyword[None] ,
identifier[wait_timeout] = keyword[None] ,
identifier[use_short_vm_names] = keyword[None] ,
identifier[n_cloud_services] = keyword[None] ,
identifier[n_storage_accounts] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_start_failed] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] % identifier[node_name] )
identifier[index] = keyword[None]
keyword[with] identifier[self] . identifier[_resource_lock] :
keyword[if] keyword[not] identifier[self] . identifier[_cluster_prep_done] :
identifier[self] . identifier[_times] [ literal[string] ]= identifier[time] . identifier[time] ()
identifier[self] . identifier[_config] . identifier[setup] (
identifier[key_name] ,
identifier[public_key_path] ,
identifier[private_key_path] ,
identifier[security_group] ,
identifier[location] ,
identifier[base_name] = identifier[base_name] ,
identifier[username] = identifier[username] ,
identifier[use_public_ips] = identifier[use_public_ips] ,
identifier[wait_timeout] = identifier[wait_timeout] ,
identifier[use_short_vm_names] = identifier[use_short_vm_names] ,
identifier[n_cloud_services] = identifier[n_cloud_services] ,
identifier[n_storage_accounts] = identifier[n_storage_accounts] ,
** identifier[kwargs] )
identifier[self] . identifier[_create_global_reqs] ()
keyword[if] identifier[self] . identifier[_start_failed] :
keyword[return] keyword[None]
identifier[self] . identifier[_times] [ literal[string] ]= identifier[time] . identifier[time] ()
identifier[self] . identifier[_cluster_prep_done] = keyword[True]
identifier[index] = identifier[self] . identifier[_n_instances]
identifier[v_m] = identifier[AzureVM] (
identifier[self] . identifier[_config] ,
identifier[index] ,
identifier[flavor] = identifier[flavor] ,
identifier[image] = identifier[image] ,
identifier[node_name] = identifier[node_name] ,
identifier[host_name] = identifier[host_name] ,
identifier[image_userdata] = identifier[image_userdata] )
identifier[v_m] . identifier[_cloud_service] . identifier[_instances] [ identifier[v_m] . identifier[_qualified_name] ]= identifier[v_m]
keyword[try] :
identifier[v_m] . identifier[_cloud_service] . identifier[_start_vm] ( identifier[v_m] )
keyword[except] identifier[Exception] :
identifier[log] . identifier[error] ( identifier[traceback] . identifier[format_exc] ())
identifier[log] . identifier[error] ( literal[string]
literal[string] )
identifier[self] . identifier[_start_failed] = keyword[True]
keyword[return] keyword[None]
identifier[log] . identifier[debug] ( literal[string] , identifier[v_m] . identifier[_qualified_name] )
keyword[if] identifier[index] == identifier[self] . identifier[_config] . identifier[_n_vms_requested] - literal[int] :
identifier[self] . identifier[_times] [ literal[string] ]= identifier[time] . identifier[time] ()
identifier[self] . identifier[_times] [ literal[string] ]= identifier[self] . identifier[_times] [ literal[string] ]- identifier[self] . identifier[_times] [ literal[string] ]
identifier[self] . identifier[_times] [ literal[string] ]= identifier[self] . identifier[_times] [ literal[string] ]- identifier[self] . identifier[_times] [ literal[string] ]
identifier[self] . identifier[_times] [ literal[string] ]= identifier[self] . identifier[_times] [ literal[string] ]+ identifier[self] . identifier[_times] [ literal[string] ]
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_times] [ literal[string] ])
identifier[log] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[_times] [ literal[string] ],
identifier[self] . identifier[_times] [ literal[string] ]/
identifier[self] . identifier[_config] . identifier[_n_vms_requested] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[_times] [ literal[string] ],
identifier[self] . identifier[_times] [ literal[string] ]/
identifier[self] . identifier[_config] . identifier[_n_vms_requested] )
identifier[time] . identifier[sleep] ( identifier[_retry_sleep] ())
identifier[self] . identifier[_save_or_update] ()
keyword[return] identifier[v_m] . identifier[_qualified_name]
|
def start_instance(self, key_name, public_key_path, private_key_path, security_group, flavor, image, image_userdata, location=None, base_name=None, username=None, node_name=None, host_name=None, use_public_ips=None, wait_timeout=None, use_short_vm_names=None, n_cloud_services=None, n_storage_accounts=None, **kwargs):
"""Starts a new instance on the cloud using the given properties.
Multiple instances might be started in different threads at the same
time. The implementation should handle any problems regarding this
itself.
:return: str - instance id of the started instance
"""
if self._start_failed:
raise Exception('start_instance for node %s: failing due to previous errors.' % node_name) # depends on [control=['if'], data=[]]
index = None
with self._resource_lock:
# it'd be nice if elasticluster called something like
# init_cluster() with all the args that will be the
# same for every node created. But since it doesn't, handle that on
# first start_instance call.
if not self._cluster_prep_done:
self._times['CLUSTER_START'] = time.time()
self._config.setup(key_name, public_key_path, private_key_path, security_group, location, base_name=base_name, username=username, use_public_ips=use_public_ips, wait_timeout=wait_timeout, use_short_vm_names=use_short_vm_names, n_cloud_services=n_cloud_services, n_storage_accounts=n_storage_accounts, **kwargs)
# we know we're starting the first node, so create global
# requirements now
self._create_global_reqs()
if self._start_failed:
return None # depends on [control=['if'], data=[]]
# this will allow vms to be created
self._times['SETUP_DONE'] = time.time()
self._cluster_prep_done = True # depends on [control=['if'], data=[]]
# absolute node index in cluster (0..n-1) determines what
# subscription, cloud service, storage
# account, etc. this VM will use. Create the vm and add it to
# its cloud service, then try to start it.
index = self._n_instances
v_m = AzureVM(self._config, index, flavor=flavor, image=image, node_name=node_name, host_name=host_name, image_userdata=image_userdata)
v_m._cloud_service._instances[v_m._qualified_name] = v_m # depends on [control=['with'], data=[]]
try:
v_m._cloud_service._start_vm(v_m) # depends on [control=['try'], data=[]]
except Exception:
log.error(traceback.format_exc())
log.error('setting start_failed flag. Will not try to start further nodes.')
self._start_failed = True
return None # depends on [control=['except'], data=[]]
log.debug('started instance %s', v_m._qualified_name)
if index == self._config._n_vms_requested - 1:
# all nodes started
self._times['NODES_STARTED'] = time.time()
self._times['SETUP_ELAPSED'] = self._times['SETUP_DONE'] - self._times['CLUSTER_START']
self._times['NODE_START_ELAPSED'] = self._times['NODES_STARTED'] - self._times['SETUP_DONE']
self._times['CLUSTER_START_ELAPSED'] = self._times['SETUP_ELAPSED'] + self._times['NODE_START_ELAPSED']
log.debug('setup time: %.1f sec', self._times['SETUP_ELAPSED'])
log.debug('node start time: %.1f sec (%.1f sec per vm)', self._times['NODE_START_ELAPSED'], self._times['NODE_START_ELAPSED'] / self._config._n_vms_requested)
log.debug('total cluster start time: %.1f sec (%.1f sec per vm)', self._times['CLUSTER_START_ELAPSED'], self._times['CLUSTER_START_ELAPSED'] / self._config._n_vms_requested)
# pause here to try to address the fact that Ansible setup fails
# more often on the first try than subsequent tries
time.sleep(_retry_sleep()) # depends on [control=['if'], data=[]]
self._save_or_update() # store our state
return v_m._qualified_name
|
def functions_shadowed(self):
'''
Return the list of functions shadowed
Returns:
list(core.Function)
'''
candidates = [c.functions_not_inherited for c in self.contract.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == self.full_name]
|
def function[functions_shadowed, parameter[self]]:
constant[
Return the list of functions shadowed
Returns:
list(core.Function)
]
variable[candidates] assign[=] <ast.ListComp object at 0x7da1b17d6e60>
variable[candidates] assign[=] <ast.ListComp object at 0x7da1b17d64a0>
return[<ast.ListComp object at 0x7da1b17d4580>]
|
keyword[def] identifier[functions_shadowed] ( identifier[self] ):
literal[string]
identifier[candidates] =[ identifier[c] . identifier[functions_not_inherited] keyword[for] identifier[c] keyword[in] identifier[self] . identifier[contract] . identifier[inheritance] ]
identifier[candidates] =[ identifier[candidate] keyword[for] identifier[sublist] keyword[in] identifier[candidates] keyword[for] identifier[candidate] keyword[in] identifier[sublist] ]
keyword[return] [ identifier[f] keyword[for] identifier[f] keyword[in] identifier[candidates] keyword[if] identifier[f] . identifier[full_name] == identifier[self] . identifier[full_name] ]
|
def functions_shadowed(self):
"""
Return the list of functions shadowed
Returns:
list(core.Function)
"""
candidates = [c.functions_not_inherited for c in self.contract.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == self.full_name]
|
def log_train(batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate):
"""Generate and print out the log message for training.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val]
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num, \
step_loss / log_interval, \
learning_rate, \
*metric_val)
|
def function[log_train, parameter[batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate]]:
constant[Generate and print out the log message for training.
]
<ast.Tuple object at 0x7da1b21688b0> assign[=] call[name[metric].get, parameter[]]
if <ast.UnaryOp object at 0x7da1b216a1d0> begin[:]
variable[metric_nm] assign[=] list[[<ast.Name object at 0x7da1b216ac50>]]
variable[metric_val] assign[=] list[[<ast.Name object at 0x7da1b2168eb0>]]
variable[train_str] assign[=] binary_operation[constant[[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:] + call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b2169c30>]]]
call[name[logging].info, parameter[name[train_str], binary_operation[name[epoch_id] + constant[1]], binary_operation[name[batch_id] + constant[1]], name[batch_num], binary_operation[name[step_loss] / name[log_interval]], name[learning_rate], <ast.Starred object at 0x7da1b216a0b0>]]
|
keyword[def] identifier[log_train] ( identifier[batch_id] , identifier[batch_num] , identifier[metric] , identifier[step_loss] , identifier[log_interval] , identifier[epoch_id] , identifier[learning_rate] ):
literal[string]
identifier[metric_nm] , identifier[metric_val] = identifier[metric] . identifier[get] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[metric_nm] , identifier[list] ):
identifier[metric_nm] =[ identifier[metric_nm] ]
identifier[metric_val] =[ identifier[metric_val] ]
identifier[train_str] = literal[string] + literal[string] . identifier[join] ([ identifier[i] + literal[string] keyword[for] identifier[i] keyword[in] identifier[metric_nm] ])
identifier[logging] . identifier[info] ( identifier[train_str] , identifier[epoch_id] + literal[int] , identifier[batch_id] + literal[int] , identifier[batch_num] , identifier[step_loss] / identifier[log_interval] , identifier[learning_rate] ,* identifier[metric_val] )
|
def log_train(batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate):
"""Generate and print out the log message for training.
"""
(metric_nm, metric_val) = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
metric_val = [metric_val] # depends on [control=['if'], data=[]]
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:' + ','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num, step_loss / log_interval, learning_rate, *metric_val)
|
def gp_ptspec():
"""example for a 2D-panel plot etc."""
fenergies = ['19', '27', '39', '62', ]# '200']
nen = len(fenergies)
mee_keys = ['pi0', 'LMR', 'omega', 'phi', 'IMR', 'jpsi']
#mee_keys = ['LMR', ]
mee_dict = OrderedDict((k,'') for k in mee_keys)
yscale = { '200': '300', '62': '5000', '39': '50', '27': '0.3', '19': '0.001' }
inDir, outDir = getWorkDirs()
data, data_avpt, dpt_dict = {}, {}, {}
yvals, yvalsPt = [], []
scale = {
'19': 1.3410566491548412, '200': 1.0, '39': 1.2719203877292842,
'27': 1.350873678084769, '62': 1.2664666321635087
}
lmr_label = None
for filename in os.listdir(inDir):
# import data
file_url = os.path.join(inDir, filename)
filebase = os.path.splitext(filename)[0] # unique
energy, mee_name, mee_range, data_type = splitFileName(filebase)
if mee_name == 'LMR':
mee_range_split = map(float, mee_range.split('-'))
lmr_label = 'LMR: %g < M_{ee} < %g GeV/c^{2}' % (
mee_range_split[0], mee_range_split[1]
)
if energy == '200': continue
if mee_name not in mee_keys: continue
mee_dict[mee_name] = mee_range
data[filebase] = np.loadtxt(open(file_url, 'rb'))
if data_type == 'data':
#print data[filebase]
data[filebase] = data[filebase][:-1] # skip mT<0.4 point
if energy == '200': data[filebase][:,(1,3,4)] /= 0.5
# calculate average pT first
mask = (data[filebase][:,0] > 0.4) & (data[filebase][:,0] < 2.2)
avpt_data = data[filebase][mask]
pTs = avpt_data[:,0]
wghts = avpt_data[:,1]
probs = unp.uarray(avpt_data[:,1], avpt_data[:,3]) # dN/pT
probs /= umath.fsum(probs) # probabilities
avpt = umath.fsum(pTs*probs)
logging.info(('%s: {} %g' % (
filebase, np.average(pTs, weights = wghts)
)).format(avpt)) # TODO: syst. uncertainties
# save datapoint for average pT and append to yvalsPt for yaxis range
dp = [ float(getEnergy4Key(energy)), avpt.nominal_value, 0., avpt.std_dev, 0. ]
avpt_key = mee_name
if data_type == 'cocktail': avpt_key += '_c'
if data_type == 'medium': avpt_key += '_m'
if data_type == 'mediumMedOnly': avpt_key += '_mMed'
if data_type == 'mediumQgpOnly': avpt_key += '_mQgp'
if avpt_key in data_avpt: data_avpt[avpt_key].append(dp)
else: data_avpt[avpt_key] = [ dp ]
yvalsPt.append(avpt.nominal_value)
# now adjust data for panel plot and append to yvals
if data_type != 'data':
data[filebase][:,(1,3,4)] /= scale[energy]
data[filebase][:,(1,3,4)] *= float(yscale[energy])
if data_type == 'cocktail' or fnmatch(data_type, '*medium*'):
data[filebase][:,2:] = 0.
yvals += [v for v in data[filebase][:,1] if v > 0]
# prepare dict for panel plot
dpt_dict_key = getSubplotTitle(mee_name, mee_range)
if dpt_dict_key not in dpt_dict:
ndsets = nen*2
# TODO: currently only 19/39/62 medium avail. w/ med/qgp/tot for each
# July14: all energies available; TODO: fix dsidx
if mee_name == 'LMR': ndsets += 4*3
dpt_dict[dpt_dict_key] = [ [None]*ndsets, [None]*ndsets, [None]*ndsets ]
enidx = fenergies.index(energy)
dsidx = enidx
if fnmatch(data_type, '*medium*'):
# 19: 0-2, 27: 3-5, 39: 6-8, 62: 9-11
dsidx = (energy=='19')*0 + (energy=='27')*3 + (energy=='39')*6 + (energy=='62')*9
dsidx += (data_type=='mediumQgpOnly')*0 + (data_type=='mediumMedOnly')*1
dsidx += (data_type=='medium')*2
else:
dsidx += int(mee_name == 'LMR') * 4 * 3 # number of medium calc avail.
dsidx += int(data_type == 'data') * len(fenergies)
dpt_dict[dpt_dict_key][0][dsidx] = data[filebase] # data
if data_type == 'data': # properties
dpt_dict[dpt_dict_key][1][dsidx] = 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[enidx]
elif data_type == 'medium':
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt 1 lw 5 lc %s' % default_colors[enidx]
else:
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt %d lw 5 lc %s' % (
2+(data_type=='mediumMedOnly')+(data_type=='mediumQgpOnly')*2, default_colors[enidx]
)
dpt_dict[dpt_dict_key][2][dsidx] = ' '.join([ # legend titles
getEnergy4Key(energy), 'GeV', '{/Symbol \264} %g' % (
Decimal(yscale[energy])#.as_tuple().exponent
)
]) if data_type == 'data' else ''
# use mass range in dict key to sort dpt_dict with increasing mass
plot_key_order = dpt_dict.keys()
plot_key_order.sort(key=lambda x: float(x.split(':')[1].split('-')[0]))
# sort data_avpt by energy and apply x-shift for better visibility
for k in data_avpt: data_avpt[k].sort(key=lambda x: x[0])
energies = [ dp[0] for dp in data_avpt[mee_keys[0]] ]
energies.append(215.) # TODO: think of better upper limit
linsp = {}
for start,stop in zip(energies[:-1],energies[1:]):
linsp[start] = np.linspace(start, stop, num = 4*len(mee_keys))
for k in data_avpt:
key = k.split('_')[0]
for i in xrange(len(data_avpt[k])):
data_avpt[k][i][0] = linsp[energies[i]][mee_keys.index(key)]
# make panel plot
yMin, yMax = 0.5*min(yvals), 3*max(yvals)
make_panel(
dpt_dict = OrderedDict((k,dpt_dict[k]) for k in plot_key_order),
name = os.path.join(outDir, 'ptspec'),
ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
ylog = True, xr = [0, 2.2], yr = [1e-9, 1e4],
#lmargin = 0.12, bmargin = 0.10, tmargin = 1., rmargin = 1.,
key = ['bottom left', 'samplen 0.5', 'width -2', 'opaque'],
arrow_bar = 0.002, layout = '3x2', size = '8in,8in'
)
#make plot for LMR spectra only
#lmr_key = getSubplotTitle('LMR', '0.4-0.76')
#if energy == '200':
# lmr_key = getSubplotTitle('LMR', '0.3-0.76')
#pseudo_point = np.array([[-1,0,0,0,0]])
#model_titles = ['Cocktail + Model', 'Cocktail', 'in-Medium', 'QGP']
#model_props = [
# 'with lines lc %s lw 5 lt %d' % (default_colors[-2], i+1)
# for i in xrange(len(model_titles))
#]
#make_plot(
# data = dpt_dict[lmr_key][0] + [ pseudo_point ] * len(model_titles),
# properties = dpt_dict[lmr_key][1] + model_props,
# titles = dpt_dict[lmr_key][2] + model_titles,
# name = os.path.join(outDir, 'ptspecLMR'),
# ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
# xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
# ylog = True, xr = [0, 2.0], yr = [1e-8, 100],
# lmargin = 0.15, bmargin = 0.08, rmargin = 0.98, tmargin = 0.84,
# key = ['maxrows 4', 'samplen 0.7', 'width -2', 'at graph 1.,1.2'],
# arrow_bar = 0.005, size = '10in,13in',
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.03,False],
# 'STAR Preliminary': [0.05,0.07,False],
# }
#)
# make mean pt plot
#yMinPt, yMaxPt = 0.95*min(yvalsPt), 1.05*max(yvalsPt)
#make_plot(
# data = [ # cocktail
# np.array(data_avpt[k+'_c']) for k in mee_keys
# ] + [ # medium
# np.array(data_avpt['LMR_m'])
# ] + [ # data
# np.array(data_avpt[k]) for k in mee_keys
# ],
# properties = [
# 'with lines lt 1 lw 4 lc %s' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ] + [
# 'with lines lt 2 lw 4 lc %s' % default_colors[mee_keys.index('LMR')]
# ] + [
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ],
# titles = [ getMeeLabel(k) for k in mee_keys ] + ['']*(len(mee_keys)+1),
# name = os.path.join(outDir, 'meanPt'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = '{/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# xlog = True, xr = [17,220], yr = [yMinPt, yMaxPt], size = '11in,9in',
# key = [ 'maxrows 1', 'at graph 1, 1.1' ],
# lmargin = 0.11, bmargin = 0.11, tmargin = 1., rmargin = 1.,
# gpcalls = [
# 'format x "%g"',
# 'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ]
#)
## make mean pt plot for LMR only
#make_plot(
# data = [
# np.array(data_avpt['LMR_c']),
# np.array(data_avpt['LMR_m']),
# np.array(data_avpt['LMR'])
# ],
# properties = [
# 'with lines lt 2 lw 4 lc %s' % default_colors[0],
# 'with lines lt 1 lw 4 lc %s' % default_colors[0],
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[0]
# ],
# titles = [
# 'cocktail', 'HMBT', getMeeLabel('data')
# ],
# name = os.path.join(outDir, 'meanPtLMR'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = 'LMR {/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# lmargin = 0.17, bmargin = 0.15, tmargin = 0.95, xlog = True, xr = [17,80],
# yr = [0.65,1.05], #yr = [yMinPt, yMaxPt],
# key = [ 'bottom right' ],
# gpcalls = [
# 'format x "%g"',
# 'xtics (20, 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ],
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.07,False],
# '0.4 < p_{T} < 2.2 GeV/c': [0.05,0.14,False]
# }
#)
return 'done'
|
def function[gp_ptspec, parameter[]]:
constant[example for a 2D-panel plot etc.]
variable[fenergies] assign[=] list[[<ast.Constant object at 0x7da1b1453e20>, <ast.Constant object at 0x7da1b1453df0>, <ast.Constant object at 0x7da1b1453dc0>, <ast.Constant object at 0x7da1b1453d90>]]
variable[nen] assign[=] call[name[len], parameter[name[fenergies]]]
variable[mee_keys] assign[=] list[[<ast.Constant object at 0x7da1b1453bb0>, <ast.Constant object at 0x7da1b1453b80>, <ast.Constant object at 0x7da1b1453b50>, <ast.Constant object at 0x7da1b1453b20>, <ast.Constant object at 0x7da1b1453af0>, <ast.Constant object at 0x7da1b1453ac0>]]
variable[mee_dict] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b14539d0>]]
variable[yscale] assign[=] dictionary[[<ast.Constant object at 0x7da1b1453790>, <ast.Constant object at 0x7da1b1453760>, <ast.Constant object at 0x7da1b1453730>, <ast.Constant object at 0x7da1b1453700>, <ast.Constant object at 0x7da1b14536d0>], [<ast.Constant object at 0x7da1b1453670>, <ast.Constant object at 0x7da1b1453640>, <ast.Constant object at 0x7da1b1453610>, <ast.Constant object at 0x7da1b14535e0>, <ast.Constant object at 0x7da1b14535b0>]]
<ast.Tuple object at 0x7da1b1453550> assign[=] call[name[getWorkDirs], parameter[]]
<ast.Tuple object at 0x7da1b1453430> assign[=] tuple[[<ast.Dict object at 0x7da1b1453340>, <ast.Dict object at 0x7da1b1453310>, <ast.Dict object at 0x7da1b14532e0>]]
<ast.Tuple object at 0x7da1b1453280> assign[=] tuple[[<ast.List object at 0x7da1b14531c0>, <ast.List object at 0x7da1b1453190>]]
variable[scale] assign[=] dictionary[[<ast.Constant object at 0x7da1b14530a0>, <ast.Constant object at 0x7da1b1453070>, <ast.Constant object at 0x7da1b1453040>, <ast.Constant object at 0x7da1b1453010>, <ast.Constant object at 0x7da1b1452fe0>], [<ast.Constant object at 0x7da1b1452f80>, <ast.Constant object at 0x7da1b1452f50>, <ast.Constant object at 0x7da1b1452f20>, <ast.Constant object at 0x7da1b1452ef0>, <ast.Constant object at 0x7da1b1452ec0>]]
variable[lmr_label] assign[=] constant[None]
for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[inDir]]]] begin[:]
variable[file_url] assign[=] call[name[os].path.join, parameter[name[inDir], name[filename]]]
variable[filebase] assign[=] call[call[name[os].path.splitext, parameter[name[filename]]]][constant[0]]
<ast.Tuple object at 0x7da1b1452980> assign[=] call[name[splitFileName], parameter[name[filebase]]]
if compare[name[mee_name] equal[==] constant[LMR]] begin[:]
variable[mee_range_split] assign[=] call[name[map], parameter[name[float], call[name[mee_range].split, parameter[constant[-]]]]]
variable[lmr_label] assign[=] binary_operation[constant[LMR: %g < M_{ee} < %g GeV/c^{2}] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1450040>, <ast.Subscript object at 0x7da1b1325cf0>]]]
if compare[name[energy] equal[==] constant[200]] begin[:]
continue
if compare[name[mee_name] <ast.NotIn object at 0x7da2590d7190> name[mee_keys]] begin[:]
continue
call[name[mee_dict]][name[mee_name]] assign[=] name[mee_range]
call[name[data]][name[filebase]] assign[=] call[name[np].loadtxt, parameter[call[name[open], parameter[name[file_url], constant[rb]]]]]
if compare[name[data_type] equal[==] constant[data]] begin[:]
call[name[data]][name[filebase]] assign[=] call[call[name[data]][name[filebase]]][<ast.Slice object at 0x7da1b1325300>]
if compare[name[energy] equal[==] constant[200]] begin[:]
<ast.AugAssign object at 0x7da1b1324a30>
variable[mask] assign[=] binary_operation[compare[call[call[name[data]][name[filebase]]][tuple[[<ast.Slice object at 0x7da1b1327c40>, <ast.Constant object at 0x7da1b1327c70>]]] greater[>] constant[0.4]] <ast.BitAnd object at 0x7da2590d6b60> compare[call[call[name[data]][name[filebase]]][tuple[[<ast.Slice object at 0x7da1b1325870>, <ast.Constant object at 0x7da1b1327dc0>]]] less[<] constant[2.2]]]
variable[avpt_data] assign[=] call[call[name[data]][name[filebase]]][name[mask]]
variable[pTs] assign[=] call[name[avpt_data]][tuple[[<ast.Slice object at 0x7da1b1326ce0>, <ast.Constant object at 0x7da1b13262c0>]]]
variable[wghts] assign[=] call[name[avpt_data]][tuple[[<ast.Slice object at 0x7da1b1326470>, <ast.Constant object at 0x7da1b1326320>]]]
variable[probs] assign[=] call[name[unp].uarray, parameter[call[name[avpt_data]][tuple[[<ast.Slice object at 0x7da1b1326440>, <ast.Constant object at 0x7da1b1326650>]]], call[name[avpt_data]][tuple[[<ast.Slice object at 0x7da1b1326f80>, <ast.Constant object at 0x7da1b1326770>]]]]]
<ast.AugAssign object at 0x7da1b1326680>
variable[avpt] assign[=] call[name[umath].fsum, parameter[binary_operation[name[pTs] * name[probs]]]]
call[name[logging].info, parameter[call[binary_operation[constant[%s: {} %g] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1326aa0>, <ast.Call object at 0x7da1b1326ad0>]]].format, parameter[name[avpt]]]]]
variable[dp] assign[=] list[[<ast.Call object at 0x7da1b1327af0>, <ast.Attribute object at 0x7da1b1326ec0>, <ast.Constant object at 0x7da1b1326e30>, <ast.Attribute object at 0x7da1b1326dd0>, <ast.Constant object at 0x7da1b1326e90>]]
variable[avpt_key] assign[=] name[mee_name]
if compare[name[data_type] equal[==] constant[cocktail]] begin[:]
<ast.AugAssign object at 0x7da1b1327b20>
if compare[name[data_type] equal[==] constant[medium]] begin[:]
<ast.AugAssign object at 0x7da1b1327bb0>
if compare[name[data_type] equal[==] constant[mediumMedOnly]] begin[:]
<ast.AugAssign object at 0x7da1b1327280>
if compare[name[data_type] equal[==] constant[mediumQgpOnly]] begin[:]
<ast.AugAssign object at 0x7da1b1327370>
if compare[name[avpt_key] in name[data_avpt]] begin[:]
call[call[name[data_avpt]][name[avpt_key]].append, parameter[name[dp]]]
call[name[yvalsPt].append, parameter[name[avpt].nominal_value]]
if compare[name[data_type] not_equal[!=] constant[data]] begin[:]
<ast.AugAssign object at 0x7da1b1324820>
<ast.AugAssign object at 0x7da1b13242e0>
if <ast.BoolOp object at 0x7da1b1324430> begin[:]
call[call[name[data]][name[filebase]]][tuple[[<ast.Slice object at 0x7da1b1325b40>, <ast.Slice object at 0x7da1b1325bd0>]]] assign[=] constant[0.0]
<ast.AugAssign object at 0x7da1b1327a00>
variable[dpt_dict_key] assign[=] call[name[getSubplotTitle], parameter[name[mee_name], name[mee_range]]]
if compare[name[dpt_dict_key] <ast.NotIn object at 0x7da2590d7190> name[dpt_dict]] begin[:]
variable[ndsets] assign[=] binary_operation[name[nen] * constant[2]]
if compare[name[mee_name] equal[==] constant[LMR]] begin[:]
<ast.AugAssign object at 0x7da1b1440850>
call[name[dpt_dict]][name[dpt_dict_key]] assign[=] list[[<ast.BinOp object at 0x7da1b14406d0>, <ast.BinOp object at 0x7da1b14412d0>, <ast.BinOp object at 0x7da1b1441360>]]
variable[enidx] assign[=] call[name[fenergies].index, parameter[name[energy]]]
variable[dsidx] assign[=] name[enidx]
if call[name[fnmatch], parameter[name[data_type], constant[*medium*]]] begin[:]
variable[dsidx] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[compare[name[energy] equal[==] constant[19]] * constant[0]] + binary_operation[compare[name[energy] equal[==] constant[27]] * constant[3]]] + binary_operation[compare[name[energy] equal[==] constant[39]] * constant[6]]] + binary_operation[compare[name[energy] equal[==] constant[62]] * constant[9]]]
<ast.AugAssign object at 0x7da1b1440b50>
<ast.AugAssign object at 0x7da1b14426e0>
<ast.AugAssign object at 0x7da1b1442aa0>
call[call[call[name[dpt_dict]][name[dpt_dict_key]]][constant[0]]][name[dsidx]] assign[=] call[name[data]][name[filebase]]
if compare[name[data_type] equal[==] constant[data]] begin[:]
call[call[call[name[dpt_dict]][name[dpt_dict_key]]][constant[1]]][name[dsidx]] assign[=] binary_operation[constant[lt 1 lw 4 ps 1.5 lc %s pt 18] <ast.Mod object at 0x7da2590d6920> call[name[default_colors]][name[enidx]]]
call[call[call[name[dpt_dict]][name[dpt_dict_key]]][constant[2]]][name[dsidx]] assign[=] <ast.IfExp object at 0x7da1b1442020>
variable[plot_key_order] assign[=] call[name[dpt_dict].keys, parameter[]]
call[name[plot_key_order].sort, parameter[]]
for taget[name[k]] in starred[name[data_avpt]] begin[:]
call[call[name[data_avpt]][name[k]].sort, parameter[]]
variable[energies] assign[=] <ast.ListComp object at 0x7da1b14e6bc0>
call[name[energies].append, parameter[constant[215.0]]]
variable[linsp] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b14e5180>, <ast.Name object at 0x7da1b14e5fc0>]]] in starred[call[name[zip], parameter[call[name[energies]][<ast.Slice object at 0x7da1b14e69b0>], call[name[energies]][<ast.Slice object at 0x7da1b14e6b90>]]]] begin[:]
call[name[linsp]][name[start]] assign[=] call[name[np].linspace, parameter[name[start], name[stop]]]
for taget[name[k]] in starred[name[data_avpt]] begin[:]
variable[key] assign[=] call[call[name[k].split, parameter[constant[_]]]][constant[0]]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[call[name[data_avpt]][name[k]]]]]]] begin[:]
call[call[call[name[data_avpt]][name[k]]][name[i]]][constant[0]] assign[=] call[call[name[linsp]][call[name[energies]][name[i]]]][call[name[mee_keys].index, parameter[name[key]]]]
<ast.Tuple object at 0x7da1b14e4880> assign[=] tuple[[<ast.BinOp object at 0x7da1b14e7040>, <ast.BinOp object at 0x7da1b14e6020>]]
call[name[make_panel], parameter[]]
return[constant[done]]
|
keyword[def] identifier[gp_ptspec] ():
literal[string]
identifier[fenergies] =[ literal[string] , literal[string] , literal[string] , literal[string] ,]
identifier[nen] = identifier[len] ( identifier[fenergies] )
identifier[mee_keys] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[mee_dict] = identifier[OrderedDict] (( identifier[k] , literal[string] ) keyword[for] identifier[k] keyword[in] identifier[mee_keys] )
identifier[yscale] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[inDir] , identifier[outDir] = identifier[getWorkDirs] ()
identifier[data] , identifier[data_avpt] , identifier[dpt_dict] ={},{},{}
identifier[yvals] , identifier[yvalsPt] =[],[]
identifier[scale] ={
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int]
}
identifier[lmr_label] = keyword[None]
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[inDir] ):
identifier[file_url] = identifier[os] . identifier[path] . identifier[join] ( identifier[inDir] , identifier[filename] )
identifier[filebase] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]
identifier[energy] , identifier[mee_name] , identifier[mee_range] , identifier[data_type] = identifier[splitFileName] ( identifier[filebase] )
keyword[if] identifier[mee_name] == literal[string] :
identifier[mee_range_split] = identifier[map] ( identifier[float] , identifier[mee_range] . identifier[split] ( literal[string] ))
identifier[lmr_label] = literal[string] %(
identifier[mee_range_split] [ literal[int] ], identifier[mee_range_split] [ literal[int] ]
)
keyword[if] identifier[energy] == literal[string] : keyword[continue]
keyword[if] identifier[mee_name] keyword[not] keyword[in] identifier[mee_keys] : keyword[continue]
identifier[mee_dict] [ identifier[mee_name] ]= identifier[mee_range]
identifier[data] [ identifier[filebase] ]= identifier[np] . identifier[loadtxt] ( identifier[open] ( identifier[file_url] , literal[string] ))
keyword[if] identifier[data_type] == literal[string] :
identifier[data] [ identifier[filebase] ]= identifier[data] [ identifier[filebase] ][:- literal[int] ]
keyword[if] identifier[energy] == literal[string] : identifier[data] [ identifier[filebase] ][:,( literal[int] , literal[int] , literal[int] )]/= literal[int]
identifier[mask] =( identifier[data] [ identifier[filebase] ][:, literal[int] ]> literal[int] )&( identifier[data] [ identifier[filebase] ][:, literal[int] ]< literal[int] )
identifier[avpt_data] = identifier[data] [ identifier[filebase] ][ identifier[mask] ]
identifier[pTs] = identifier[avpt_data] [:, literal[int] ]
identifier[wghts] = identifier[avpt_data] [:, literal[int] ]
identifier[probs] = identifier[unp] . identifier[uarray] ( identifier[avpt_data] [:, literal[int] ], identifier[avpt_data] [:, literal[int] ])
identifier[probs] /= identifier[umath] . identifier[fsum] ( identifier[probs] )
identifier[avpt] = identifier[umath] . identifier[fsum] ( identifier[pTs] * identifier[probs] )
identifier[logging] . identifier[info] (( literal[string] %(
identifier[filebase] , identifier[np] . identifier[average] ( identifier[pTs] , identifier[weights] = identifier[wghts] )
)). identifier[format] ( identifier[avpt] ))
identifier[dp] =[ identifier[float] ( identifier[getEnergy4Key] ( identifier[energy] )), identifier[avpt] . identifier[nominal_value] , literal[int] , identifier[avpt] . identifier[std_dev] , literal[int] ]
identifier[avpt_key] = identifier[mee_name]
keyword[if] identifier[data_type] == literal[string] : identifier[avpt_key] += literal[string]
keyword[if] identifier[data_type] == literal[string] : identifier[avpt_key] += literal[string]
keyword[if] identifier[data_type] == literal[string] : identifier[avpt_key] += literal[string]
keyword[if] identifier[data_type] == literal[string] : identifier[avpt_key] += literal[string]
keyword[if] identifier[avpt_key] keyword[in] identifier[data_avpt] : identifier[data_avpt] [ identifier[avpt_key] ]. identifier[append] ( identifier[dp] )
keyword[else] : identifier[data_avpt] [ identifier[avpt_key] ]=[ identifier[dp] ]
identifier[yvalsPt] . identifier[append] ( identifier[avpt] . identifier[nominal_value] )
keyword[if] identifier[data_type] != literal[string] :
identifier[data] [ identifier[filebase] ][:,( literal[int] , literal[int] , literal[int] )]/= identifier[scale] [ identifier[energy] ]
identifier[data] [ identifier[filebase] ][:,( literal[int] , literal[int] , literal[int] )]*= identifier[float] ( identifier[yscale] [ identifier[energy] ])
keyword[if] identifier[data_type] == literal[string] keyword[or] identifier[fnmatch] ( identifier[data_type] , literal[string] ):
identifier[data] [ identifier[filebase] ][:, literal[int] :]= literal[int]
identifier[yvals] +=[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[data] [ identifier[filebase] ][:, literal[int] ] keyword[if] identifier[v] > literal[int] ]
identifier[dpt_dict_key] = identifier[getSubplotTitle] ( identifier[mee_name] , identifier[mee_range] )
keyword[if] identifier[dpt_dict_key] keyword[not] keyword[in] identifier[dpt_dict] :
identifier[ndsets] = identifier[nen] * literal[int]
keyword[if] identifier[mee_name] == literal[string] : identifier[ndsets] += literal[int] * literal[int]
identifier[dpt_dict] [ identifier[dpt_dict_key] ]=[[ keyword[None] ]* identifier[ndsets] ,[ keyword[None] ]* identifier[ndsets] ,[ keyword[None] ]* identifier[ndsets] ]
identifier[enidx] = identifier[fenergies] . identifier[index] ( identifier[energy] )
identifier[dsidx] = identifier[enidx]
keyword[if] identifier[fnmatch] ( identifier[data_type] , literal[string] ):
identifier[dsidx] =( identifier[energy] == literal[string] )* literal[int] +( identifier[energy] == literal[string] )* literal[int] +( identifier[energy] == literal[string] )* literal[int] +( identifier[energy] == literal[string] )* literal[int]
identifier[dsidx] +=( identifier[data_type] == literal[string] )* literal[int] +( identifier[data_type] == literal[string] )* literal[int]
identifier[dsidx] +=( identifier[data_type] == literal[string] )* literal[int]
keyword[else] :
identifier[dsidx] += identifier[int] ( identifier[mee_name] == literal[string] )* literal[int] * literal[int]
identifier[dsidx] += identifier[int] ( identifier[data_type] == literal[string] )* identifier[len] ( identifier[fenergies] )
identifier[dpt_dict] [ identifier[dpt_dict_key] ][ literal[int] ][ identifier[dsidx] ]= identifier[data] [ identifier[filebase] ]
keyword[if] identifier[data_type] == literal[string] :
identifier[dpt_dict] [ identifier[dpt_dict_key] ][ literal[int] ][ identifier[dsidx] ]= literal[string] % identifier[default_colors] [ identifier[enidx] ]
keyword[elif] identifier[data_type] == literal[string] :
identifier[dpt_dict] [ identifier[dpt_dict_key] ][ literal[int] ][ identifier[dsidx] ]= literal[string] % identifier[default_colors] [ identifier[enidx] ]
keyword[else] :
identifier[dpt_dict] [ identifier[dpt_dict_key] ][ literal[int] ][ identifier[dsidx] ]= literal[string] %(
literal[int] +( identifier[data_type] == literal[string] )+( identifier[data_type] == literal[string] )* literal[int] , identifier[default_colors] [ identifier[enidx] ]
)
identifier[dpt_dict] [ identifier[dpt_dict_key] ][ literal[int] ][ identifier[dsidx] ]= literal[string] . identifier[join] ([
identifier[getEnergy4Key] ( identifier[energy] ), literal[string] , literal[string] %(
identifier[Decimal] ( identifier[yscale] [ identifier[energy] ])
)
]) keyword[if] identifier[data_type] == literal[string] keyword[else] literal[string]
identifier[plot_key_order] = identifier[dpt_dict] . identifier[keys] ()
identifier[plot_key_order] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[float] ( identifier[x] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]))
keyword[for] identifier[k] keyword[in] identifier[data_avpt] : identifier[data_avpt] [ identifier[k] ]. identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])
identifier[energies] =[ identifier[dp] [ literal[int] ] keyword[for] identifier[dp] keyword[in] identifier[data_avpt] [ identifier[mee_keys] [ literal[int] ]]]
identifier[energies] . identifier[append] ( literal[int] )
identifier[linsp] ={}
keyword[for] identifier[start] , identifier[stop] keyword[in] identifier[zip] ( identifier[energies] [:- literal[int] ], identifier[energies] [ literal[int] :]):
identifier[linsp] [ identifier[start] ]= identifier[np] . identifier[linspace] ( identifier[start] , identifier[stop] , identifier[num] = literal[int] * identifier[len] ( identifier[mee_keys] ))
keyword[for] identifier[k] keyword[in] identifier[data_avpt] :
identifier[key] = identifier[k] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[data_avpt] [ identifier[k] ])):
identifier[data_avpt] [ identifier[k] ][ identifier[i] ][ literal[int] ]= identifier[linsp] [ identifier[energies] [ identifier[i] ]][ identifier[mee_keys] . identifier[index] ( identifier[key] )]
identifier[yMin] , identifier[yMax] = literal[int] * identifier[min] ( identifier[yvals] ), literal[int] * identifier[max] ( identifier[yvals] )
identifier[make_panel] (
identifier[dpt_dict] = identifier[OrderedDict] (( identifier[k] , identifier[dpt_dict] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[plot_key_order] ),
identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[outDir] , literal[string] ),
identifier[ylabel] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylog] = keyword[True] , identifier[xr] =[ literal[int] , literal[int] ], identifier[yr] =[ literal[int] , literal[int] ],
identifier[key] =[ literal[string] , literal[string] , literal[string] , literal[string] ],
identifier[arrow_bar] = literal[int] , identifier[layout] = literal[string] , identifier[size] = literal[string]
)
keyword[return] literal[string]
|
def gp_ptspec():
"""example for a 2D-panel plot etc."""
fenergies = ['19', '27', '39', '62'] # '200']
nen = len(fenergies)
mee_keys = ['pi0', 'LMR', 'omega', 'phi', 'IMR', 'jpsi']
#mee_keys = ['LMR', ]
mee_dict = OrderedDict(((k, '') for k in mee_keys))
yscale = {'200': '300', '62': '5000', '39': '50', '27': '0.3', '19': '0.001'}
(inDir, outDir) = getWorkDirs()
(data, data_avpt, dpt_dict) = ({}, {}, {})
(yvals, yvalsPt) = ([], [])
scale = {'19': 1.3410566491548412, '200': 1.0, '39': 1.2719203877292842, '27': 1.350873678084769, '62': 1.2664666321635087}
lmr_label = None
for filename in os.listdir(inDir):
# import data
file_url = os.path.join(inDir, filename)
filebase = os.path.splitext(filename)[0] # unique
(energy, mee_name, mee_range, data_type) = splitFileName(filebase)
if mee_name == 'LMR':
mee_range_split = map(float, mee_range.split('-'))
lmr_label = 'LMR: %g < M_{ee} < %g GeV/c^{2}' % (mee_range_split[0], mee_range_split[1]) # depends on [control=['if'], data=[]]
if energy == '200':
continue # depends on [control=['if'], data=[]]
if mee_name not in mee_keys:
continue # depends on [control=['if'], data=[]]
mee_dict[mee_name] = mee_range
data[filebase] = np.loadtxt(open(file_url, 'rb'))
if data_type == 'data':
#print data[filebase]
data[filebase] = data[filebase][:-1] # skip mT<0.4 point # depends on [control=['if'], data=[]]
if energy == '200':
data[filebase][:, (1, 3, 4)] /= 0.5 # depends on [control=['if'], data=[]]
# calculate average pT first
mask = (data[filebase][:, 0] > 0.4) & (data[filebase][:, 0] < 2.2)
avpt_data = data[filebase][mask]
pTs = avpt_data[:, 0]
wghts = avpt_data[:, 1]
probs = unp.uarray(avpt_data[:, 1], avpt_data[:, 3]) # dN/pT
probs /= umath.fsum(probs) # probabilities
avpt = umath.fsum(pTs * probs)
logging.info(('%s: {} %g' % (filebase, np.average(pTs, weights=wghts))).format(avpt)) # TODO: syst. uncertainties
# save datapoint for average pT and append to yvalsPt for yaxis range
dp = [float(getEnergy4Key(energy)), avpt.nominal_value, 0.0, avpt.std_dev, 0.0]
avpt_key = mee_name
if data_type == 'cocktail':
avpt_key += '_c' # depends on [control=['if'], data=[]]
if data_type == 'medium':
avpt_key += '_m' # depends on [control=['if'], data=[]]
if data_type == 'mediumMedOnly':
avpt_key += '_mMed' # depends on [control=['if'], data=[]]
if data_type == 'mediumQgpOnly':
avpt_key += '_mQgp' # depends on [control=['if'], data=[]]
if avpt_key in data_avpt:
data_avpt[avpt_key].append(dp) # depends on [control=['if'], data=['avpt_key', 'data_avpt']]
else:
data_avpt[avpt_key] = [dp]
yvalsPt.append(avpt.nominal_value)
# now adjust data for panel plot and append to yvals
if data_type != 'data':
data[filebase][:, (1, 3, 4)] /= scale[energy] # depends on [control=['if'], data=[]]
data[filebase][:, (1, 3, 4)] *= float(yscale[energy])
if data_type == 'cocktail' or fnmatch(data_type, '*medium*'):
data[filebase][:, 2:] = 0.0 # depends on [control=['if'], data=[]]
yvals += [v for v in data[filebase][:, 1] if v > 0]
# prepare dict for panel plot
dpt_dict_key = getSubplotTitle(mee_name, mee_range)
if dpt_dict_key not in dpt_dict:
ndsets = nen * 2
# TODO: currently only 19/39/62 medium avail. w/ med/qgp/tot for each
# July14: all energies available; TODO: fix dsidx
if mee_name == 'LMR':
ndsets += 4 * 3 # depends on [control=['if'], data=[]]
dpt_dict[dpt_dict_key] = [[None] * ndsets, [None] * ndsets, [None] * ndsets] # depends on [control=['if'], data=['dpt_dict_key', 'dpt_dict']]
enidx = fenergies.index(energy)
dsidx = enidx
if fnmatch(data_type, '*medium*'):
# 19: 0-2, 27: 3-5, 39: 6-8, 62: 9-11
dsidx = (energy == '19') * 0 + (energy == '27') * 3 + (energy == '39') * 6 + (energy == '62') * 9
dsidx += (data_type == 'mediumQgpOnly') * 0 + (data_type == 'mediumMedOnly') * 1
dsidx += (data_type == 'medium') * 2 # depends on [control=['if'], data=[]]
else:
dsidx += int(mee_name == 'LMR') * 4 * 3 # number of medium calc avail.
dsidx += int(data_type == 'data') * len(fenergies)
dpt_dict[dpt_dict_key][0][dsidx] = data[filebase] # data
if data_type == 'data': # properties
dpt_dict[dpt_dict_key][1][dsidx] = 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[enidx] # depends on [control=['if'], data=[]]
elif data_type == 'medium':
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt 1 lw 5 lc %s' % default_colors[enidx] # depends on [control=['if'], data=[]]
else:
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt %d lw 5 lc %s' % (2 + (data_type == 'mediumMedOnly') + (data_type == 'mediumQgpOnly') * 2, default_colors[enidx]) # legend titles
#.as_tuple().exponent
dpt_dict[dpt_dict_key][2][dsidx] = ' '.join([getEnergy4Key(energy), 'GeV', '{/Symbol ´} %g' % Decimal(yscale[energy])]) if data_type == 'data' else '' # depends on [control=['for'], data=['filename']]
# use mass range in dict key to sort dpt_dict with increasing mass
plot_key_order = dpt_dict.keys()
plot_key_order.sort(key=lambda x: float(x.split(':')[1].split('-')[0]))
# sort data_avpt by energy and apply x-shift for better visibility
for k in data_avpt:
data_avpt[k].sort(key=lambda x: x[0]) # depends on [control=['for'], data=['k']]
energies = [dp[0] for dp in data_avpt[mee_keys[0]]]
energies.append(215.0) # TODO: think of better upper limit
linsp = {}
for (start, stop) in zip(energies[:-1], energies[1:]):
linsp[start] = np.linspace(start, stop, num=4 * len(mee_keys)) # depends on [control=['for'], data=[]]
for k in data_avpt:
key = k.split('_')[0]
for i in xrange(len(data_avpt[k])):
data_avpt[k][i][0] = linsp[energies[i]][mee_keys.index(key)] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['k']]
# make panel plot
(yMin, yMax) = (0.5 * min(yvals), 3 * max(yvals))
#lmargin = 0.12, bmargin = 0.10, tmargin = 1., rmargin = 1.,
make_panel(dpt_dict=OrderedDict(((k, dpt_dict[k]) for k in plot_key_order)), name=os.path.join(outDir, 'ptspec'), ylabel='1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)', xlabel='dielectron transverse momentum, p_{T} (GeV/c)', ylog=True, xr=[0, 2.2], yr=[1e-09, 10000.0], key=['bottom left', 'samplen 0.5', 'width -2', 'opaque'], arrow_bar=0.002, layout='3x2', size='8in,8in')
#make plot for LMR spectra only
#lmr_key = getSubplotTitle('LMR', '0.4-0.76')
#if energy == '200':
# lmr_key = getSubplotTitle('LMR', '0.3-0.76')
#pseudo_point = np.array([[-1,0,0,0,0]])
#model_titles = ['Cocktail + Model', 'Cocktail', 'in-Medium', 'QGP']
#model_props = [
# 'with lines lc %s lw 5 lt %d' % (default_colors[-2], i+1)
# for i in xrange(len(model_titles))
#]
#make_plot(
# data = dpt_dict[lmr_key][0] + [ pseudo_point ] * len(model_titles),
# properties = dpt_dict[lmr_key][1] + model_props,
# titles = dpt_dict[lmr_key][2] + model_titles,
# name = os.path.join(outDir, 'ptspecLMR'),
# ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
# xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
# ylog = True, xr = [0, 2.0], yr = [1e-8, 100],
# lmargin = 0.15, bmargin = 0.08, rmargin = 0.98, tmargin = 0.84,
# key = ['maxrows 4', 'samplen 0.7', 'width -2', 'at graph 1.,1.2'],
# arrow_bar = 0.005, size = '10in,13in',
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.03,False],
# 'STAR Preliminary': [0.05,0.07,False],
# }
#)
# make mean pt plot
#yMinPt, yMaxPt = 0.95*min(yvalsPt), 1.05*max(yvalsPt)
#make_plot(
# data = [ # cocktail
# np.array(data_avpt[k+'_c']) for k in mee_keys
# ] + [ # medium
# np.array(data_avpt['LMR_m'])
# ] + [ # data
# np.array(data_avpt[k]) for k in mee_keys
# ],
# properties = [
# 'with lines lt 1 lw 4 lc %s' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ] + [
# 'with lines lt 2 lw 4 lc %s' % default_colors[mee_keys.index('LMR')]
# ] + [
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ],
# titles = [ getMeeLabel(k) for k in mee_keys ] + ['']*(len(mee_keys)+1),
# name = os.path.join(outDir, 'meanPt'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = '{/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# xlog = True, xr = [17,220], yr = [yMinPt, yMaxPt], size = '11in,9in',
# key = [ 'maxrows 1', 'at graph 1, 1.1' ],
# lmargin = 0.11, bmargin = 0.11, tmargin = 1., rmargin = 1.,
# gpcalls = [
# 'format x "%g"',
# 'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ]
#)
## make mean pt plot for LMR only
#make_plot(
# data = [
# np.array(data_avpt['LMR_c']),
# np.array(data_avpt['LMR_m']),
# np.array(data_avpt['LMR'])
# ],
# properties = [
# 'with lines lt 2 lw 4 lc %s' % default_colors[0],
# 'with lines lt 1 lw 4 lc %s' % default_colors[0],
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[0]
# ],
# titles = [
# 'cocktail', 'HMBT', getMeeLabel('data')
# ],
# name = os.path.join(outDir, 'meanPtLMR'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = 'LMR {/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# lmargin = 0.17, bmargin = 0.15, tmargin = 0.95, xlog = True, xr = [17,80],
# yr = [0.65,1.05], #yr = [yMinPt, yMaxPt],
# key = [ 'bottom right' ],
# gpcalls = [
# 'format x "%g"',
# 'xtics (20, 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ],
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.07,False],
# '0.4 < p_{T} < 2.2 GeV/c': [0.05,0.14,False]
# }
#)
return 'done'
|
def build_joins(self):
"""
Generates the sql for the JOIN portion of the query
:return: the JOIN portion of the query
:rtype: str
"""
join_parts = []
# get the sql for each join object
for join_item in self.joins:
join_parts.append(join_item.get_sql())
# if there are any joins, combine them
if len(join_parts):
combined_joins = ' '.join(join_parts)
return '{0} '.format(combined_joins)
return ''
|
def function[build_joins, parameter[self]]:
constant[
Generates the sql for the JOIN portion of the query
:return: the JOIN portion of the query
:rtype: str
]
variable[join_parts] assign[=] list[[]]
for taget[name[join_item]] in starred[name[self].joins] begin[:]
call[name[join_parts].append, parameter[call[name[join_item].get_sql, parameter[]]]]
if call[name[len], parameter[name[join_parts]]] begin[:]
variable[combined_joins] assign[=] call[constant[ ].join, parameter[name[join_parts]]]
return[call[constant[{0} ].format, parameter[name[combined_joins]]]]
return[constant[]]
|
keyword[def] identifier[build_joins] ( identifier[self] ):
literal[string]
identifier[join_parts] =[]
keyword[for] identifier[join_item] keyword[in] identifier[self] . identifier[joins] :
identifier[join_parts] . identifier[append] ( identifier[join_item] . identifier[get_sql] ())
keyword[if] identifier[len] ( identifier[join_parts] ):
identifier[combined_joins] = literal[string] . identifier[join] ( identifier[join_parts] )
keyword[return] literal[string] . identifier[format] ( identifier[combined_joins] )
keyword[return] literal[string]
|
def build_joins(self):
"""
Generates the sql for the JOIN portion of the query
:return: the JOIN portion of the query
:rtype: str
"""
join_parts = []
# get the sql for each join object
for join_item in self.joins:
join_parts.append(join_item.get_sql()) # depends on [control=['for'], data=['join_item']]
# if there are any joins, combine them
if len(join_parts):
combined_joins = ' '.join(join_parts)
return '{0} '.format(combined_joins) # depends on [control=['if'], data=[]]
return ''
|
def stack_lines(indices):
"""
Stack a list of values that represent a polyline into
individual line segments with duplicated consecutive values.
Parameters
----------
indices: sequence of items
Returns
---------
stacked: (n,2) set of items
In [1]: trimesh.util.stack_lines([0,1,2])
Out[1]:
array([[0, 1],
[1, 2]])
In [2]: trimesh.util.stack_lines([0,1,2,4,5])
Out[2]:
array([[0, 1],
[1, 2],
[2, 4],
[4, 5]])
In [3]: trimesh.util.stack_lines([[0,0],[1,1],[2,2], [3,3]])
Out[3]:
array([[0, 0],
[1, 1],
[1, 1],
[2, 2],
[2, 2],
[3, 3]])
"""
indices = np.asanyarray(indices)
if is_sequence(indices[0]):
shape = (-1, len(indices[0]))
else:
shape = (-1, 2)
return np.column_stack((indices[:-1],
indices[1:])).reshape(shape)
|
def function[stack_lines, parameter[indices]]:
constant[
Stack a list of values that represent a polyline into
individual line segments with duplicated consecutive values.
Parameters
----------
indices: sequence of items
Returns
---------
stacked: (n,2) set of items
In [1]: trimesh.util.stack_lines([0,1,2])
Out[1]:
array([[0, 1],
[1, 2]])
In [2]: trimesh.util.stack_lines([0,1,2,4,5])
Out[2]:
array([[0, 1],
[1, 2],
[2, 4],
[4, 5]])
In [3]: trimesh.util.stack_lines([[0,0],[1,1],[2,2], [3,3]])
Out[3]:
array([[0, 0],
[1, 1],
[1, 1],
[2, 2],
[2, 2],
[3, 3]])
]
variable[indices] assign[=] call[name[np].asanyarray, parameter[name[indices]]]
if call[name[is_sequence], parameter[call[name[indices]][constant[0]]]] begin[:]
variable[shape] assign[=] tuple[[<ast.UnaryOp object at 0x7da2044c22f0>, <ast.Call object at 0x7da2044c3df0>]]
return[call[call[name[np].column_stack, parameter[tuple[[<ast.Subscript object at 0x7da2044c3b20>, <ast.Subscript object at 0x7da2044c0a60>]]]].reshape, parameter[name[shape]]]]
|
keyword[def] identifier[stack_lines] ( identifier[indices] ):
literal[string]
identifier[indices] = identifier[np] . identifier[asanyarray] ( identifier[indices] )
keyword[if] identifier[is_sequence] ( identifier[indices] [ literal[int] ]):
identifier[shape] =(- literal[int] , identifier[len] ( identifier[indices] [ literal[int] ]))
keyword[else] :
identifier[shape] =(- literal[int] , literal[int] )
keyword[return] identifier[np] . identifier[column_stack] (( identifier[indices] [:- literal[int] ],
identifier[indices] [ literal[int] :])). identifier[reshape] ( identifier[shape] )
|
def stack_lines(indices):
"""
Stack a list of values that represent a polyline into
individual line segments with duplicated consecutive values.
Parameters
----------
indices: sequence of items
Returns
---------
stacked: (n,2) set of items
In [1]: trimesh.util.stack_lines([0,1,2])
Out[1]:
array([[0, 1],
[1, 2]])
In [2]: trimesh.util.stack_lines([0,1,2,4,5])
Out[2]:
array([[0, 1],
[1, 2],
[2, 4],
[4, 5]])
In [3]: trimesh.util.stack_lines([[0,0],[1,1],[2,2], [3,3]])
Out[3]:
array([[0, 0],
[1, 1],
[1, 1],
[2, 2],
[2, 2],
[3, 3]])
"""
indices = np.asanyarray(indices)
if is_sequence(indices[0]):
shape = (-1, len(indices[0])) # depends on [control=['if'], data=[]]
else:
shape = (-1, 2)
return np.column_stack((indices[:-1], indices[1:])).reshape(shape)
|
def gettextvalue(self, window_name, object_name, startPosition=0, endPosition=0):
"""
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXValue
|
def function[gettextvalue, parameter[self, window_name, object_name, startPosition, endPosition]]:
constant[
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
]
variable[object_handle] assign[=] call[name[self]._get_object_handle, parameter[name[window_name], name[object_name]]]
if <ast.UnaryOp object at 0x7da18f09eef0> begin[:]
<ast.Raise object at 0x7da18f09d150>
return[name[object_handle].AXValue]
|
keyword[def] identifier[gettextvalue] ( identifier[self] , identifier[window_name] , identifier[object_name] , identifier[startPosition] = literal[int] , identifier[endPosition] = literal[int] ):
literal[string]
identifier[object_handle] = identifier[self] . identifier[_get_object_handle] ( identifier[window_name] , identifier[object_name] )
keyword[if] keyword[not] identifier[object_handle] . identifier[AXEnabled] :
keyword[raise] identifier[LdtpServerException] ( literal[string] % identifier[object_name] )
keyword[return] identifier[object_handle] . identifier[AXValue]
|
def gettextvalue(self, window_name, object_name, startPosition=0, endPosition=0):
"""
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u'Object %s state disabled' % object_name) # depends on [control=['if'], data=[]]
return object_handle.AXValue
|
def expanduser(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
if path == "~":
return _get_userdir()
elif path.startswith("~" + sep) or (
altsep is not None and path.startswith("~" + altsep)):
userdir = _get_userdir()
if userdir is None:
return path
return userdir + path[1:]
elif path.startswith("~"):
sep_index = path.find(sep)
if altsep is not None:
alt_index = path.find(altsep)
if alt_index != -1 and alt_index < sep_index:
sep_index = alt_index
if sep_index == -1:
user = path[1:]
rest = ""
else:
user = path[1:sep_index]
rest = path[sep_index:]
userdir = _get_userdir(user)
if userdir is not None:
return userdir + rest
else:
return path
else:
return path
|
def function[expanduser, parameter[path]]:
constant[
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
]
variable[path] assign[=] call[name[path2fsn], parameter[name[path]]]
if compare[name[path] equal[==] constant[~]] begin[:]
return[call[name[_get_userdir], parameter[]]]
|
keyword[def] identifier[expanduser] ( identifier[path] ):
literal[string]
identifier[path] = identifier[path2fsn] ( identifier[path] )
keyword[if] identifier[path] == literal[string] :
keyword[return] identifier[_get_userdir] ()
keyword[elif] identifier[path] . identifier[startswith] ( literal[string] + identifier[sep] ) keyword[or] (
identifier[altsep] keyword[is] keyword[not] keyword[None] keyword[and] identifier[path] . identifier[startswith] ( literal[string] + identifier[altsep] )):
identifier[userdir] = identifier[_get_userdir] ()
keyword[if] identifier[userdir] keyword[is] keyword[None] :
keyword[return] identifier[path]
keyword[return] identifier[userdir] + identifier[path] [ literal[int] :]
keyword[elif] identifier[path] . identifier[startswith] ( literal[string] ):
identifier[sep_index] = identifier[path] . identifier[find] ( identifier[sep] )
keyword[if] identifier[altsep] keyword[is] keyword[not] keyword[None] :
identifier[alt_index] = identifier[path] . identifier[find] ( identifier[altsep] )
keyword[if] identifier[alt_index] !=- literal[int] keyword[and] identifier[alt_index] < identifier[sep_index] :
identifier[sep_index] = identifier[alt_index]
keyword[if] identifier[sep_index] ==- literal[int] :
identifier[user] = identifier[path] [ literal[int] :]
identifier[rest] = literal[string]
keyword[else] :
identifier[user] = identifier[path] [ literal[int] : identifier[sep_index] ]
identifier[rest] = identifier[path] [ identifier[sep_index] :]
identifier[userdir] = identifier[_get_userdir] ( identifier[user] )
keyword[if] identifier[userdir] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[userdir] + identifier[rest]
keyword[else] :
keyword[return] identifier[path]
keyword[else] :
keyword[return] identifier[path]
|
def expanduser(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
if path == '~':
return _get_userdir() # depends on [control=['if'], data=[]]
elif path.startswith('~' + sep) or (altsep is not None and path.startswith('~' + altsep)):
userdir = _get_userdir()
if userdir is None:
return path # depends on [control=['if'], data=[]]
return userdir + path[1:] # depends on [control=['if'], data=[]]
elif path.startswith('~'):
sep_index = path.find(sep)
if altsep is not None:
alt_index = path.find(altsep)
if alt_index != -1 and alt_index < sep_index:
sep_index = alt_index # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['altsep']]
if sep_index == -1:
user = path[1:]
rest = '' # depends on [control=['if'], data=[]]
else:
user = path[1:sep_index]
rest = path[sep_index:]
userdir = _get_userdir(user)
if userdir is not None:
return userdir + rest # depends on [control=['if'], data=['userdir']]
else:
return path # depends on [control=['if'], data=[]]
else:
return path
|
def get(self, time, interpolate='previous'):
"""Get the value of the time series, even in-between measured values.
"""
try:
getter = self.getter_functions[interpolate]
except KeyError:
msg = (
"unknown value '{}' for interpolate, "
"valid values are in [{}]"
).format(interpolate, ', '.join(self.getter_functions))
raise ValueError(msg)
else:
return getter(time)
|
def function[get, parameter[self, time, interpolate]]:
constant[Get the value of the time series, even in-between measured values.
]
<ast.Try object at 0x7da1b08baf50>
|
keyword[def] identifier[get] ( identifier[self] , identifier[time] , identifier[interpolate] = literal[string] ):
literal[string]
keyword[try] :
identifier[getter] = identifier[self] . identifier[getter_functions] [ identifier[interpolate] ]
keyword[except] identifier[KeyError] :
identifier[msg] =(
literal[string]
literal[string]
). identifier[format] ( identifier[interpolate] , literal[string] . identifier[join] ( identifier[self] . identifier[getter_functions] ))
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[else] :
keyword[return] identifier[getter] ( identifier[time] )
|
def get(self, time, interpolate='previous'):
"""Get the value of the time series, even in-between measured values.
"""
try:
getter = self.getter_functions[interpolate] # depends on [control=['try'], data=[]]
except KeyError:
msg = "unknown value '{}' for interpolate, valid values are in [{}]".format(interpolate, ', '.join(self.getter_functions))
raise ValueError(msg) # depends on [control=['except'], data=[]]
else:
return getter(time)
|
def _trigger(self):
"""
Add stats to json and dump to disk.
Note that this method is idempotent.
"""
if len(self._stat_now):
self._stat_now['epoch_num'] = self.epoch_num
self._stat_now['global_step'] = self.global_step
self._stats.append(self._stat_now)
self._stat_now = {}
self._write_stat()
|
def function[_trigger, parameter[self]]:
constant[
Add stats to json and dump to disk.
Note that this method is idempotent.
]
if call[name[len], parameter[name[self]._stat_now]] begin[:]
call[name[self]._stat_now][constant[epoch_num]] assign[=] name[self].epoch_num
call[name[self]._stat_now][constant[global_step]] assign[=] name[self].global_step
call[name[self]._stats.append, parameter[name[self]._stat_now]]
name[self]._stat_now assign[=] dictionary[[], []]
call[name[self]._write_stat, parameter[]]
|
keyword[def] identifier[_trigger] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[_stat_now] ):
identifier[self] . identifier[_stat_now] [ literal[string] ]= identifier[self] . identifier[epoch_num]
identifier[self] . identifier[_stat_now] [ literal[string] ]= identifier[self] . identifier[global_step]
identifier[self] . identifier[_stats] . identifier[append] ( identifier[self] . identifier[_stat_now] )
identifier[self] . identifier[_stat_now] ={}
identifier[self] . identifier[_write_stat] ()
|
def _trigger(self):
"""
Add stats to json and dump to disk.
Note that this method is idempotent.
"""
if len(self._stat_now):
self._stat_now['epoch_num'] = self.epoch_num
self._stat_now['global_step'] = self.global_step
self._stats.append(self._stat_now)
self._stat_now = {}
self._write_stat() # depends on [control=['if'], data=[]]
|
def shift_multi(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
|
def function[shift_multi, parameter[x, wrg, hrg, is_random, row_index, col_index, channel_index, fill_mode, cval, order]]:
constant[Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
]
<ast.Tuple object at 0x7da18bc70ac0> assign[=] tuple[[<ast.Subscript object at 0x7da18bc72440>, <ast.Subscript object at 0x7da18bc72380>]]
if name[is_random] begin[:]
variable[tx] assign[=] binary_operation[call[name[np].random.uniform, parameter[<ast.UnaryOp object at 0x7da18bc71720>, name[hrg]]] * name[h]]
variable[ty] assign[=] binary_operation[call[name[np].random.uniform, parameter[<ast.UnaryOp object at 0x7da18bc70670>, name[wrg]]] * name[w]]
variable[translation_matrix] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da18bc70880>, <ast.List object at 0x7da18bc73b20>, <ast.List object at 0x7da18bc72cb0>]]]]
variable[transform_matrix] assign[=] name[translation_matrix]
variable[results] assign[=] list[[]]
for taget[name[data]] in starred[name[x]] begin[:]
call[name[results].append, parameter[call[name[affine_transform], parameter[name[data], name[transform_matrix], name[channel_index], name[fill_mode], name[cval], name[order]]]]]
return[call[name[np].asarray, parameter[name[results]]]]
|
keyword[def] identifier[shift_multi] (
identifier[x] , identifier[wrg] = literal[int] , identifier[hrg] = literal[int] , identifier[is_random] = keyword[False] , identifier[row_index] = literal[int] , identifier[col_index] = literal[int] , identifier[channel_index] = literal[int] , identifier[fill_mode] = literal[string] , identifier[cval] = literal[int] ,
identifier[order] = literal[int]
):
literal[string]
identifier[h] , identifier[w] = identifier[x] [ literal[int] ]. identifier[shape] [ identifier[row_index] ], identifier[x] [ literal[int] ]. identifier[shape] [ identifier[col_index] ]
keyword[if] identifier[is_random] :
identifier[tx] = identifier[np] . identifier[random] . identifier[uniform] (- identifier[hrg] , identifier[hrg] )* identifier[h]
identifier[ty] = identifier[np] . identifier[random] . identifier[uniform] (- identifier[wrg] , identifier[wrg] )* identifier[w]
keyword[else] :
identifier[tx] , identifier[ty] = identifier[hrg] * identifier[h] , identifier[wrg] * identifier[w]
identifier[translation_matrix] = identifier[np] . identifier[array] ([[ literal[int] , literal[int] , identifier[tx] ],[ literal[int] , literal[int] , identifier[ty] ],[ literal[int] , literal[int] , literal[int] ]])
identifier[transform_matrix] = identifier[translation_matrix]
identifier[results] =[]
keyword[for] identifier[data] keyword[in] identifier[x] :
identifier[results] . identifier[append] ( identifier[affine_transform] ( identifier[data] , identifier[transform_matrix] , identifier[channel_index] , identifier[fill_mode] , identifier[cval] , identifier[order] ))
keyword[return] identifier[np] . identifier[asarray] ( identifier[results] )
|
def shift_multi(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.0, order=1):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
(h, w) = (x[0].shape[row_index], x[0].shape[col_index])
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w # depends on [control=['if'], data=[]]
else:
(tx, ty) = (hrg * h, wrg * w)
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) # depends on [control=['for'], data=['data']]
return np.asarray(results)
|
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return CPIODirectory(self._file_system, self.path_spec)
|
def function[_GetDirectory, parameter[self]]:
constant[Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
]
if compare[name[self].entry_type not_equal[!=] name[definitions].FILE_ENTRY_TYPE_DIRECTORY] begin[:]
return[constant[None]]
return[call[name[CPIODirectory], parameter[name[self]._file_system, name[self].path_spec]]]
|
keyword[def] identifier[_GetDirectory] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[entry_type] != identifier[definitions] . identifier[FILE_ENTRY_TYPE_DIRECTORY] :
keyword[return] keyword[None]
keyword[return] identifier[CPIODirectory] ( identifier[self] . identifier[_file_system] , identifier[self] . identifier[path_spec] )
|
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None # depends on [control=['if'], data=[]]
return CPIODirectory(self._file_system, self.path_spec)
|
def restart(name, no_block=False, unmask=False, unmask_runtime=False):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Restart the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
restart the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to restart
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime)
ret = __salt__['cmd.run_all'](
_systemctl_cmd('restart', name, systemd_scope=True, no_block=no_block),
python_shell=False)
if ret['retcode'] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret['stderr']))
return True
|
def function[restart, parameter[name, no_block, unmask, unmask_runtime]]:
constant[
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Restart the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
restart the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to restart
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
]
call[name[_check_for_unit_changes], parameter[name[name]]]
call[name[_check_unmask], parameter[name[name], name[unmask], name[unmask_runtime]]]
variable[ret] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[name[_systemctl_cmd], parameter[constant[restart], name[name]]]]]
if compare[call[name[ret]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da204566650>
return[constant[True]]
|
keyword[def] identifier[restart] ( identifier[name] , identifier[no_block] = keyword[False] , identifier[unmask] = keyword[False] , identifier[unmask_runtime] = keyword[False] ):
literal[string]
identifier[_check_for_unit_changes] ( identifier[name] )
identifier[_check_unmask] ( identifier[name] , identifier[unmask] , identifier[unmask_runtime] )
identifier[ret] = identifier[__salt__] [ literal[string] ](
identifier[_systemctl_cmd] ( literal[string] , identifier[name] , identifier[systemd_scope] = keyword[True] , identifier[no_block] = identifier[no_block] ),
identifier[python_shell] = keyword[False] )
keyword[if] identifier[ret] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[CommandExecutionError] ( identifier[_strip_scope] ( identifier[ret] [ literal[string] ]))
keyword[return] keyword[True]
|
def restart(name, no_block=False, unmask=False, unmask_runtime=False):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Restart the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
restart the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to restart
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime)
ret = __salt__['cmd.run_all'](_systemctl_cmd('restart', name, systemd_scope=True, no_block=no_block), python_shell=False)
if ret['retcode'] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret['stderr'])) # depends on [control=['if'], data=[]]
return True
|
def _stringify_path(path_or_buffer):
'''Convert path like object to string
Args:
path_or_buffer: object to be converted
Returns:
string_path_or_buffer: maybe string version of path_or_buffer
'''
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
if hasattr(path_or_buffer, '__fspath__'):
return path_or_buffer.__fspath__()
if _PATHLIB_INSTALLED and isinstance(path_or_buffer, pathlib.Path):
return text_type(path_or_buffer)
return path_or_buffer
|
def function[_stringify_path, parameter[path_or_buffer]]:
constant[Convert path like object to string
Args:
path_or_buffer: object to be converted
Returns:
string_path_or_buffer: maybe string version of path_or_buffer
]
<ast.Try object at 0x7da18f09c2b0>
if call[name[hasattr], parameter[name[path_or_buffer], constant[__fspath__]]] begin[:]
return[call[name[path_or_buffer].__fspath__, parameter[]]]
if <ast.BoolOp object at 0x7da18f720d30> begin[:]
return[call[name[text_type], parameter[name[path_or_buffer]]]]
return[name[path_or_buffer]]
|
keyword[def] identifier[_stringify_path] ( identifier[path_or_buffer] ):
literal[string]
keyword[try] :
keyword[import] identifier[pathlib]
identifier[_PATHLIB_INSTALLED] = keyword[True]
keyword[except] identifier[ImportError] :
identifier[_PATHLIB_INSTALLED] = keyword[False]
keyword[if] identifier[hasattr] ( identifier[path_or_buffer] , literal[string] ):
keyword[return] identifier[path_or_buffer] . identifier[__fspath__] ()
keyword[if] identifier[_PATHLIB_INSTALLED] keyword[and] identifier[isinstance] ( identifier[path_or_buffer] , identifier[pathlib] . identifier[Path] ):
keyword[return] identifier[text_type] ( identifier[path_or_buffer] )
keyword[return] identifier[path_or_buffer]
|
def _stringify_path(path_or_buffer):
"""Convert path like object to string
Args:
path_or_buffer: object to be converted
Returns:
string_path_or_buffer: maybe string version of path_or_buffer
"""
try:
import pathlib
_PATHLIB_INSTALLED = True # depends on [control=['try'], data=[]]
except ImportError:
_PATHLIB_INSTALLED = False # depends on [control=['except'], data=[]]
if hasattr(path_or_buffer, '__fspath__'):
return path_or_buffer.__fspath__() # depends on [control=['if'], data=[]]
if _PATHLIB_INSTALLED and isinstance(path_or_buffer, pathlib.Path):
return text_type(path_or_buffer) # depends on [control=['if'], data=[]]
return path_or_buffer
|
def _bytes_to_values(self, bs, width=None):
"""Convert a packed row of bytes into a row of values.
Result will be a freshly allocated object,
not shared with the argument.
"""
if self.bitdepth == 8:
return bytearray(bs)
if self.bitdepth == 16:
return array('H',
struct.unpack('!%dH' % (len(bs) // 2), bs))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = bytearray()
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in bs:
out.extend([mask & (o >> i) for i in shifts])
return out[:width]
|
def function[_bytes_to_values, parameter[self, bs, width]]:
constant[Convert a packed row of bytes into a row of values.
Result will be a freshly allocated object,
not shared with the argument.
]
if compare[name[self].bitdepth equal[==] constant[8]] begin[:]
return[call[name[bytearray], parameter[name[bs]]]]
if compare[name[self].bitdepth equal[==] constant[16]] begin[:]
return[call[name[array], parameter[constant[H], call[name[struct].unpack, parameter[binary_operation[constant[!%dH] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[len], parameter[name[bs]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]], name[bs]]]]]]
assert[compare[name[self].bitdepth less[<] constant[8]]]
if compare[name[width] is constant[None]] begin[:]
variable[width] assign[=] name[self].width
variable[spb] assign[=] binary_operation[constant[8] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].bitdepth]
variable[out] assign[=] call[name[bytearray], parameter[]]
variable[mask] assign[=] binary_operation[binary_operation[constant[2] ** name[self].bitdepth] - constant[1]]
variable[shifts] assign[=] <ast.ListComp object at 0x7da1b06ffbe0>
for taget[name[o]] in starred[name[bs]] begin[:]
call[name[out].extend, parameter[<ast.ListComp object at 0x7da1b06ff5b0>]]
return[call[name[out]][<ast.Slice object at 0x7da1b06ff370>]]
|
keyword[def] identifier[_bytes_to_values] ( identifier[self] , identifier[bs] , identifier[width] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[bitdepth] == literal[int] :
keyword[return] identifier[bytearray] ( identifier[bs] )
keyword[if] identifier[self] . identifier[bitdepth] == literal[int] :
keyword[return] identifier[array] ( literal[string] ,
identifier[struct] . identifier[unpack] ( literal[string] %( identifier[len] ( identifier[bs] )// literal[int] ), identifier[bs] ))
keyword[assert] identifier[self] . identifier[bitdepth] < literal[int]
keyword[if] identifier[width] keyword[is] keyword[None] :
identifier[width] = identifier[self] . identifier[width]
identifier[spb] = literal[int] // identifier[self] . identifier[bitdepth]
identifier[out] = identifier[bytearray] ()
identifier[mask] = literal[int] ** identifier[self] . identifier[bitdepth] - literal[int]
identifier[shifts] =[ identifier[self] . identifier[bitdepth] * identifier[i]
keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[list] ( identifier[range] ( identifier[spb] )))]
keyword[for] identifier[o] keyword[in] identifier[bs] :
identifier[out] . identifier[extend] ([ identifier[mask] &( identifier[o] >> identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[shifts] ])
keyword[return] identifier[out] [: identifier[width] ]
|
def _bytes_to_values(self, bs, width=None):
"""Convert a packed row of bytes into a row of values.
Result will be a freshly allocated object,
not shared with the argument.
"""
if self.bitdepth == 8:
return bytearray(bs) # depends on [control=['if'], data=[]]
if self.bitdepth == 16:
return array('H', struct.unpack('!%dH' % (len(bs) // 2), bs)) # depends on [control=['if'], data=[]]
assert self.bitdepth < 8
if width is None:
width = self.width # depends on [control=['if'], data=['width']]
# Samples per byte
spb = 8 // self.bitdepth
out = bytearray()
mask = 2 ** self.bitdepth - 1
shifts = [self.bitdepth * i for i in reversed(list(range(spb)))]
for o in bs:
out.extend([mask & o >> i for i in shifts]) # depends on [control=['for'], data=['o']]
return out[:width]
|
def _digits(self):
""" 0-9 """
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = \
self.items.index(self.items_com[max(int(self.number) - 1, 0)])
else:
self.top.body.focus_position = \
self.items.index(self.items[max(int(self.number) - 1, 0)])
except IndexError:
self.number = self.number[:-1]
self.top.keypress(self.size, "") # Trick urwid into redisplaying the cursor
if self.number:
self._footer_start_thread("Selection: {}".format(self.number), 1)
|
def function[_digits, parameter[self]]:
constant[ 0-9 ]
<ast.AugAssign object at 0x7da20c6c5300>
<ast.Try object at 0x7da20c6c6f20>
call[name[self].top.keypress, parameter[name[self].size, constant[]]]
if name[self].number begin[:]
call[name[self]._footer_start_thread, parameter[call[constant[Selection: {}].format, parameter[name[self].number]], constant[1]]]
|
keyword[def] identifier[_digits] ( identifier[self] ):
literal[string]
identifier[self] . identifier[number] += identifier[self] . identifier[key]
keyword[try] :
keyword[if] identifier[self] . identifier[compact] keyword[is] keyword[False] :
identifier[self] . identifier[top] . identifier[body] . identifier[focus_position] = identifier[self] . identifier[items] . identifier[index] ( identifier[self] . identifier[items_com] [ identifier[max] ( identifier[int] ( identifier[self] . identifier[number] )- literal[int] , literal[int] )])
keyword[else] :
identifier[self] . identifier[top] . identifier[body] . identifier[focus_position] = identifier[self] . identifier[items] . identifier[index] ( identifier[self] . identifier[items] [ identifier[max] ( identifier[int] ( identifier[self] . identifier[number] )- literal[int] , literal[int] )])
keyword[except] identifier[IndexError] :
identifier[self] . identifier[number] = identifier[self] . identifier[number] [:- literal[int] ]
identifier[self] . identifier[top] . identifier[keypress] ( identifier[self] . identifier[size] , literal[string] )
keyword[if] identifier[self] . identifier[number] :
identifier[self] . identifier[_footer_start_thread] ( literal[string] . identifier[format] ( identifier[self] . identifier[number] ), literal[int] )
|
def _digits(self):
""" 0-9 """
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = self.items.index(self.items_com[max(int(self.number) - 1, 0)]) # depends on [control=['if'], data=[]]
else:
self.top.body.focus_position = self.items.index(self.items[max(int(self.number) - 1, 0)]) # depends on [control=['try'], data=[]]
except IndexError:
self.number = self.number[:-1] # depends on [control=['except'], data=[]]
self.top.keypress(self.size, '') # Trick urwid into redisplaying the cursor
if self.number:
self._footer_start_thread('Selection: {}'.format(self.number), 1) # depends on [control=['if'], data=[]]
|
def update_transfer_encoding(self) -> None:
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if 'chunked' in te:
if self.chunked:
raise ValueError(
'chunked can not be set '
'if "Transfer-Encoding: chunked" header is set')
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
'chunked can not be set '
'if Content-Length header is set')
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
|
def function[update_transfer_encoding, parameter[self]]:
constant[Analyze transfer-encoding header.]
variable[te] assign[=] call[call[name[self].headers.get, parameter[name[hdrs].TRANSFER_ENCODING, constant[]]].lower, parameter[]]
if compare[constant[chunked] in name[te]] begin[:]
if name[self].chunked begin[:]
<ast.Raise object at 0x7da1b1f41270>
|
keyword[def] identifier[update_transfer_encoding] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[te] = identifier[self] . identifier[headers] . identifier[get] ( identifier[hdrs] . identifier[TRANSFER_ENCODING] , literal[string] ). identifier[lower] ()
keyword[if] literal[string] keyword[in] identifier[te] :
keyword[if] identifier[self] . identifier[chunked] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
keyword[elif] identifier[self] . identifier[chunked] :
keyword[if] identifier[hdrs] . identifier[CONTENT_LENGTH] keyword[in] identifier[self] . identifier[headers] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
identifier[self] . identifier[headers] [ identifier[hdrs] . identifier[TRANSFER_ENCODING] ]= literal[string]
keyword[else] :
keyword[if] identifier[hdrs] . identifier[CONTENT_LENGTH] keyword[not] keyword[in] identifier[self] . identifier[headers] :
identifier[self] . identifier[headers] [ identifier[hdrs] . identifier[CONTENT_LENGTH] ]= identifier[str] ( identifier[len] ( identifier[self] . identifier[body] ))
|
def update_transfer_encoding(self) -> None:
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if 'chunked' in te:
if self.chunked:
raise ValueError('chunked can not be set if "Transfer-Encoding: chunked" header is set') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError('chunked can not be set if Content-Length header is set') # depends on [control=['if'], data=[]]
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked' # depends on [control=['if'], data=[]]
elif hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body)) # depends on [control=['if'], data=[]]
|
def variant_acmg(institute_id, case_name, variant_id):
"""ACMG classification form."""
if request.method == 'GET':
data = controllers.variant_acmg(store, institute_id, case_name, variant_id)
return data
else:
criteria = []
criteria_terms = request.form.getlist('criteria')
for term in criteria_terms:
criteria.append(dict(
term=term,
comment=request.form.get("comment-{}".format(term)),
links=[request.form.get("link-{}".format(term))],
))
acmg = controllers.variant_acmg_post(store, institute_id, case_name, variant_id,
current_user.email, criteria)
flash("classified as: {}".format(acmg), 'info')
return redirect(url_for('.variant', institute_id=institute_id, case_name=case_name,
variant_id=variant_id))
|
def function[variant_acmg, parameter[institute_id, case_name, variant_id]]:
constant[ACMG classification form.]
if compare[name[request].method equal[==] constant[GET]] begin[:]
variable[data] assign[=] call[name[controllers].variant_acmg, parameter[name[store], name[institute_id], name[case_name], name[variant_id]]]
return[name[data]]
|
keyword[def] identifier[variant_acmg] ( identifier[institute_id] , identifier[case_name] , identifier[variant_id] ):
literal[string]
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[data] = identifier[controllers] . identifier[variant_acmg] ( identifier[store] , identifier[institute_id] , identifier[case_name] , identifier[variant_id] )
keyword[return] identifier[data]
keyword[else] :
identifier[criteria] =[]
identifier[criteria_terms] = identifier[request] . identifier[form] . identifier[getlist] ( literal[string] )
keyword[for] identifier[term] keyword[in] identifier[criteria_terms] :
identifier[criteria] . identifier[append] ( identifier[dict] (
identifier[term] = identifier[term] ,
identifier[comment] = identifier[request] . identifier[form] . identifier[get] ( literal[string] . identifier[format] ( identifier[term] )),
identifier[links] =[ identifier[request] . identifier[form] . identifier[get] ( literal[string] . identifier[format] ( identifier[term] ))],
))
identifier[acmg] = identifier[controllers] . identifier[variant_acmg_post] ( identifier[store] , identifier[institute_id] , identifier[case_name] , identifier[variant_id] ,
identifier[current_user] . identifier[email] , identifier[criteria] )
identifier[flash] ( literal[string] . identifier[format] ( identifier[acmg] ), literal[string] )
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[institute_id] = identifier[institute_id] , identifier[case_name] = identifier[case_name] ,
identifier[variant_id] = identifier[variant_id] ))
|
def variant_acmg(institute_id, case_name, variant_id):
"""ACMG classification form."""
if request.method == 'GET':
data = controllers.variant_acmg(store, institute_id, case_name, variant_id)
return data # depends on [control=['if'], data=[]]
else:
criteria = []
criteria_terms = request.form.getlist('criteria')
for term in criteria_terms:
criteria.append(dict(term=term, comment=request.form.get('comment-{}'.format(term)), links=[request.form.get('link-{}'.format(term))])) # depends on [control=['for'], data=['term']]
acmg = controllers.variant_acmg_post(store, institute_id, case_name, variant_id, current_user.email, criteria)
flash('classified as: {}'.format(acmg), 'info')
return redirect(url_for('.variant', institute_id=institute_id, case_name=case_name, variant_id=variant_id))
|
def asyncthread(func):
'Function decorator, to make calls to `func()` spawn a separate thread if available.'
@functools.wraps(func)
def _execAsync(*args, **kwargs):
return vd().execAsync(func, *args, **kwargs)
return _execAsync
|
def function[asyncthread, parameter[func]]:
constant[Function decorator, to make calls to `func()` spawn a separate thread if available.]
def function[_execAsync, parameter[]]:
return[call[call[name[vd], parameter[]].execAsync, parameter[name[func], <ast.Starred object at 0x7da18ede5750>]]]
return[name[_execAsync]]
|
keyword[def] identifier[asyncthread] ( identifier[func] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[_execAsync] (* identifier[args] ,** identifier[kwargs] ):
keyword[return] identifier[vd] (). identifier[execAsync] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_execAsync]
|
def asyncthread(func):
"""Function decorator, to make calls to `func()` spawn a separate thread if available."""
@functools.wraps(func)
def _execAsync(*args, **kwargs):
return vd().execAsync(func, *args, **kwargs)
return _execAsync
|
def get_server_public(self, password_verifier, server_private):
"""B = (k*v + g^b) % N
:param int password_verifier:
:param int server_private:
:rtype: int
"""
return ((self._mult * password_verifier) + pow(self._gen, server_private, self._prime)) % self._prime
|
def function[get_server_public, parameter[self, password_verifier, server_private]]:
constant[B = (k*v + g^b) % N
:param int password_verifier:
:param int server_private:
:rtype: int
]
return[binary_operation[binary_operation[binary_operation[name[self]._mult * name[password_verifier]] + call[name[pow], parameter[name[self]._gen, name[server_private], name[self]._prime]]] <ast.Mod object at 0x7da2590d6920> name[self]._prime]]
|
keyword[def] identifier[get_server_public] ( identifier[self] , identifier[password_verifier] , identifier[server_private] ):
literal[string]
keyword[return] (( identifier[self] . identifier[_mult] * identifier[password_verifier] )+ identifier[pow] ( identifier[self] . identifier[_gen] , identifier[server_private] , identifier[self] . identifier[_prime] ))% identifier[self] . identifier[_prime]
|
def get_server_public(self, password_verifier, server_private):
"""B = (k*v + g^b) % N
:param int password_verifier:
:param int server_private:
:rtype: int
"""
return (self._mult * password_verifier + pow(self._gen, server_private, self._prime)) % self._prime
|
def get_source(label, source_type, **kwargs):
"""Get a config source based on type and keyword args.
This is meant to be used internally by the spec via ``add_source``.
Args:
label (str): The label for this source.
source_type: The type of source. See ``yapconf.SUPPORTED_SOURCES``
Keyword Args:
The keyword arguments are based on the source_type. Please see the
documentation of the individual sources for a detailed list of all
possible arguments.
Returns (yapconf.sources.ConfigSource):
A valid config source which can be used for generating an override.
Raises:
YapconfSourceError: If there is some kind of error with this source
definition.
"""
if source_type not in yapconf.ALL_SUPPORTED_SOURCES:
raise YapconfSourceError(
'Invalid source type %s. Supported types are %s.' %
(source_type, yapconf.ALL_SUPPORTED_SOURCES)
)
if source_type not in yapconf.SUPPORTED_SOURCES:
raise YapconfSourceError(
'Unsupported source type "%s". If you want to use this type, you '
'will need to install the correct client for it (try `pip install '
'yapconf[%s]. Currently supported types are %s. All supported '
'types are %s' %
(source_type, source_type, yapconf.SUPPORTED_SOURCES,
yapconf.ALL_SUPPORTED_SOURCES)
)
# We pop arguments from kwargs because the individual config sources
# have better error messages if a keyword argument is missed.
if source_type == 'dict':
return DictConfigSource(label, data=kwargs.get('data'))
elif source_type == 'json':
return JsonConfigSource(label, **kwargs)
elif source_type == 'yaml':
filename = kwargs.get('filename')
if 'filename' in kwargs:
kwargs.pop('filename')
return YamlConfigSource(label, filename, **kwargs)
elif source_type == 'environment':
return EnvironmentConfigSource(label)
elif source_type == 'etcd':
return EtcdConfigSource(
label, kwargs.get('client'), kwargs.get('key', '/')
)
elif source_type == 'kubernetes':
name = kwargs.get('name')
if 'name' in kwargs:
kwargs.pop('name')
client = kwargs.get('client')
if 'client' in kwargs:
kwargs.pop('client')
return KubernetesConfigSource(label, client, name, **kwargs)
else:
raise NotImplementedError(
'No implementation for source type %s' % source_type
)
|
def function[get_source, parameter[label, source_type]]:
constant[Get a config source based on type and keyword args.
This is meant to be used internally by the spec via ``add_source``.
Args:
label (str): The label for this source.
source_type: The type of source. See ``yapconf.SUPPORTED_SOURCES``
Keyword Args:
The keyword arguments are based on the source_type. Please see the
documentation of the individual sources for a detailed list of all
possible arguments.
Returns (yapconf.sources.ConfigSource):
A valid config source which can be used for generating an override.
Raises:
YapconfSourceError: If there is some kind of error with this source
definition.
]
if compare[name[source_type] <ast.NotIn object at 0x7da2590d7190> name[yapconf].ALL_SUPPORTED_SOURCES] begin[:]
<ast.Raise object at 0x7da1b1dfc2e0>
if compare[name[source_type] <ast.NotIn object at 0x7da2590d7190> name[yapconf].SUPPORTED_SOURCES] begin[:]
<ast.Raise object at 0x7da1b1dfc580>
if compare[name[source_type] equal[==] constant[dict]] begin[:]
return[call[name[DictConfigSource], parameter[name[label]]]]
|
keyword[def] identifier[get_source] ( identifier[label] , identifier[source_type] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[source_type] keyword[not] keyword[in] identifier[yapconf] . identifier[ALL_SUPPORTED_SOURCES] :
keyword[raise] identifier[YapconfSourceError] (
literal[string] %
( identifier[source_type] , identifier[yapconf] . identifier[ALL_SUPPORTED_SOURCES] )
)
keyword[if] identifier[source_type] keyword[not] keyword[in] identifier[yapconf] . identifier[SUPPORTED_SOURCES] :
keyword[raise] identifier[YapconfSourceError] (
literal[string]
literal[string]
literal[string]
literal[string] %
( identifier[source_type] , identifier[source_type] , identifier[yapconf] . identifier[SUPPORTED_SOURCES] ,
identifier[yapconf] . identifier[ALL_SUPPORTED_SOURCES] )
)
keyword[if] identifier[source_type] == literal[string] :
keyword[return] identifier[DictConfigSource] ( identifier[label] , identifier[data] = identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[elif] identifier[source_type] == literal[string] :
keyword[return] identifier[JsonConfigSource] ( identifier[label] ,** identifier[kwargs] )
keyword[elif] identifier[source_type] == literal[string] :
identifier[filename] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[return] identifier[YamlConfigSource] ( identifier[label] , identifier[filename] ,** identifier[kwargs] )
keyword[elif] identifier[source_type] == literal[string] :
keyword[return] identifier[EnvironmentConfigSource] ( identifier[label] )
keyword[elif] identifier[source_type] == literal[string] :
keyword[return] identifier[EtcdConfigSource] (
identifier[label] , identifier[kwargs] . identifier[get] ( literal[string] ), identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
)
keyword[elif] identifier[source_type] == literal[string] :
identifier[name] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[client] = identifier[kwargs] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[return] identifier[KubernetesConfigSource] ( identifier[label] , identifier[client] , identifier[name] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] (
literal[string] % identifier[source_type]
)
|
def get_source(label, source_type, **kwargs):
"""Get a config source based on type and keyword args.
This is meant to be used internally by the spec via ``add_source``.
Args:
label (str): The label for this source.
source_type: The type of source. See ``yapconf.SUPPORTED_SOURCES``
Keyword Args:
The keyword arguments are based on the source_type. Please see the
documentation of the individual sources for a detailed list of all
possible arguments.
Returns (yapconf.sources.ConfigSource):
A valid config source which can be used for generating an override.
Raises:
YapconfSourceError: If there is some kind of error with this source
definition.
"""
if source_type not in yapconf.ALL_SUPPORTED_SOURCES:
raise YapconfSourceError('Invalid source type %s. Supported types are %s.' % (source_type, yapconf.ALL_SUPPORTED_SOURCES)) # depends on [control=['if'], data=['source_type']]
if source_type not in yapconf.SUPPORTED_SOURCES:
raise YapconfSourceError('Unsupported source type "%s". If you want to use this type, you will need to install the correct client for it (try `pip install yapconf[%s]. Currently supported types are %s. All supported types are %s' % (source_type, source_type, yapconf.SUPPORTED_SOURCES, yapconf.ALL_SUPPORTED_SOURCES)) # depends on [control=['if'], data=['source_type']]
# We pop arguments from kwargs because the individual config sources
# have better error messages if a keyword argument is missed.
if source_type == 'dict':
return DictConfigSource(label, data=kwargs.get('data')) # depends on [control=['if'], data=[]]
elif source_type == 'json':
return JsonConfigSource(label, **kwargs) # depends on [control=['if'], data=[]]
elif source_type == 'yaml':
filename = kwargs.get('filename')
if 'filename' in kwargs:
kwargs.pop('filename') # depends on [control=['if'], data=['kwargs']]
return YamlConfigSource(label, filename, **kwargs) # depends on [control=['if'], data=[]]
elif source_type == 'environment':
return EnvironmentConfigSource(label) # depends on [control=['if'], data=[]]
elif source_type == 'etcd':
return EtcdConfigSource(label, kwargs.get('client'), kwargs.get('key', '/')) # depends on [control=['if'], data=[]]
elif source_type == 'kubernetes':
name = kwargs.get('name')
if 'name' in kwargs:
kwargs.pop('name') # depends on [control=['if'], data=['kwargs']]
client = kwargs.get('client')
if 'client' in kwargs:
kwargs.pop('client') # depends on [control=['if'], data=['kwargs']]
return KubernetesConfigSource(label, client, name, **kwargs) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('No implementation for source type %s' % source_type)
|
def profile_delete(name, remote_addr=None,
cert=None, key=None, verify_cert=True):
''' Deletes a profile.
name :
The name of the profile to delete.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_delete shared_mounts
'''
profile = profile_get(
name,
remote_addr,
cert,
key,
verify_cert,
_raw=True
)
profile.delete()
return True
|
def function[profile_delete, parameter[name, remote_addr, cert, key, verify_cert]]:
constant[ Deletes a profile.
name :
The name of the profile to delete.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_delete shared_mounts
]
variable[profile] assign[=] call[name[profile_get], parameter[name[name], name[remote_addr], name[cert], name[key], name[verify_cert]]]
call[name[profile].delete, parameter[]]
return[constant[True]]
|
keyword[def] identifier[profile_delete] ( identifier[name] , identifier[remote_addr] = keyword[None] ,
identifier[cert] = keyword[None] , identifier[key] = keyword[None] , identifier[verify_cert] = keyword[True] ):
literal[string]
identifier[profile] = identifier[profile_get] (
identifier[name] ,
identifier[remote_addr] ,
identifier[cert] ,
identifier[key] ,
identifier[verify_cert] ,
identifier[_raw] = keyword[True]
)
identifier[profile] . identifier[delete] ()
keyword[return] keyword[True]
|
def profile_delete(name, remote_addr=None, cert=None, key=None, verify_cert=True):
""" Deletes a profile.
name :
The name of the profile to delete.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_delete shared_mounts
"""
profile = profile_get(name, remote_addr, cert, key, verify_cert, _raw=True)
profile.delete()
return True
|
def send_cmd_recv_rsp(self, target, data, timeout):
"""Exchange data with a remote Target
Sends command *data* to the remote *target* discovered in the
most recent call to one of the sense_xxx() methods. Note that
*target* becomes invalid with any call to mute(), sense_xxx()
or listen_xxx()
Arguments:
target (nfc.clf.RemoteTarget): The target returned by the
last successful call of a sense_xxx() method.
data (bytearray): The binary data to send to the remote
device.
timeout (float): The maximum number of seconds to wait for
response data from the remote device.
Returns:
bytearray: Response data received from the remote device.
Raises:
nfc.clf.CommunicationError: When no data was received.
"""
fname = "send_cmd_recv_rsp"
cname = self.__class__.__module__ + '.' + self.__class__.__name__
raise NotImplementedError("%s.%s() is required" % (cname, fname))
|
def function[send_cmd_recv_rsp, parameter[self, target, data, timeout]]:
constant[Exchange data with a remote Target
Sends command *data* to the remote *target* discovered in the
most recent call to one of the sense_xxx() methods. Note that
*target* becomes invalid with any call to mute(), sense_xxx()
or listen_xxx()
Arguments:
target (nfc.clf.RemoteTarget): The target returned by the
last successful call of a sense_xxx() method.
data (bytearray): The binary data to send to the remote
device.
timeout (float): The maximum number of seconds to wait for
response data from the remote device.
Returns:
bytearray: Response data received from the remote device.
Raises:
nfc.clf.CommunicationError: When no data was received.
]
variable[fname] assign[=] constant[send_cmd_recv_rsp]
variable[cname] assign[=] binary_operation[binary_operation[name[self].__class__.__module__ + constant[.]] + name[self].__class__.__name__]
<ast.Raise object at 0x7da207f99b10>
|
keyword[def] identifier[send_cmd_recv_rsp] ( identifier[self] , identifier[target] , identifier[data] , identifier[timeout] ):
literal[string]
identifier[fname] = literal[string]
identifier[cname] = identifier[self] . identifier[__class__] . identifier[__module__] + literal[string] + identifier[self] . identifier[__class__] . identifier[__name__]
keyword[raise] identifier[NotImplementedError] ( literal[string] %( identifier[cname] , identifier[fname] ))
|
def send_cmd_recv_rsp(self, target, data, timeout):
"""Exchange data with a remote Target
Sends command *data* to the remote *target* discovered in the
most recent call to one of the sense_xxx() methods. Note that
*target* becomes invalid with any call to mute(), sense_xxx()
or listen_xxx()
Arguments:
target (nfc.clf.RemoteTarget): The target returned by the
last successful call of a sense_xxx() method.
data (bytearray): The binary data to send to the remote
device.
timeout (float): The maximum number of seconds to wait for
response data from the remote device.
Returns:
bytearray: Response data received from the remote device.
Raises:
nfc.clf.CommunicationError: When no data was received.
"""
fname = 'send_cmd_recv_rsp'
cname = self.__class__.__module__ + '.' + self.__class__.__name__
raise NotImplementedError('%s.%s() is required' % (cname, fname))
|
def on_each_(*validation_functions_collection):
"""
Generates a validation_function for collection inputs where each element of the input will be validated against the
corresponding validation_function(s) in the validation_functions_collection. Validators inside the tuple can be
provided as a list for convenience, this will be replaced with an 'and_' operator if the list has more than one
element.
Note that if you want to apply the SAME validation_functions to all elements in the input, you should rather use
on_all_.
:param validation_functions_collection: a sequence of (base validation function or list of base validation functions
to use).
A base validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or
a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list).
Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/)
expressions can be used instead of callables, they will be transformed to functions automatically.
:return:
"""
# create a tuple of validation functions.
validation_function_funcs = tuple(_process_validation_function_s(validation_func)
for validation_func in validation_functions_collection)
# generate a validation function based on the tuple of validation_functions lists
def on_each_val(x # type: Tuple
):
if len(validation_function_funcs) != len(x):
raise Failure('on_each_: x does not have the same number of elements than validation_functions_collection.')
else:
# apply each validation_function on the input with the same position in the collection
idx = -1
for elt, validation_function_func in zip(x, validation_function_funcs):
idx += 1
try:
res = validation_function_func(elt)
except Exception as e:
raise InvalidItemInSequence(wrong_value=elt,
wrapped_func=validation_function_func,
validation_outcome=e)
if not result_is_success(res):
# one validation_function was unhappy > raise
# raise Failure('on_each_(' + str(validation_functions_collection) + '): _validation_function [' + str(idx)
# + '] (' + str(validation_functions_collection[idx]) + ') failed validation for '
# 'input ' + str(x[idx]))
raise InvalidItemInSequence(wrong_value=elt,
wrapped_func=validation_function_func,
validation_outcome=res)
return True
on_each_val.__name__ = 'map_<{}>_on_elts' \
''.format('(' + ', '.join([get_callable_name(f) for f in validation_function_funcs]) + ')')
return on_each_val
|
def function[on_each_, parameter[]]:
constant[
Generates a validation_function for collection inputs where each element of the input will be validated against the
corresponding validation_function(s) in the validation_functions_collection. Validators inside the tuple can be
provided as a list for convenience, this will be replaced with an 'and_' operator if the list has more than one
element.
Note that if you want to apply the SAME validation_functions to all elements in the input, you should rather use
on_all_.
:param validation_functions_collection: a sequence of (base validation function or list of base validation functions
to use).
A base validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or
a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list).
Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/)
expressions can be used instead of callables, they will be transformed to functions automatically.
:return:
]
variable[validation_function_funcs] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b0d1b5e0>]]
def function[on_each_val, parameter[x]]:
if compare[call[name[len], parameter[name[validation_function_funcs]]] not_equal[!=] call[name[len], parameter[name[x]]]] begin[:]
<ast.Raise object at 0x7da1b0d1a5f0>
name[on_each_val].__name__ assign[=] call[constant[map_<{}>_on_elts].format, parameter[binary_operation[binary_operation[constant[(] + call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b0ff08b0>]]] + constant[)]]]]
return[name[on_each_val]]
|
keyword[def] identifier[on_each_] (* identifier[validation_functions_collection] ):
literal[string]
identifier[validation_function_funcs] = identifier[tuple] ( identifier[_process_validation_function_s] ( identifier[validation_func] )
keyword[for] identifier[validation_func] keyword[in] identifier[validation_functions_collection] )
keyword[def] identifier[on_each_val] ( identifier[x]
):
keyword[if] identifier[len] ( identifier[validation_function_funcs] )!= identifier[len] ( identifier[x] ):
keyword[raise] identifier[Failure] ( literal[string] )
keyword[else] :
identifier[idx] =- literal[int]
keyword[for] identifier[elt] , identifier[validation_function_func] keyword[in] identifier[zip] ( identifier[x] , identifier[validation_function_funcs] ):
identifier[idx] += literal[int]
keyword[try] :
identifier[res] = identifier[validation_function_func] ( identifier[elt] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[InvalidItemInSequence] ( identifier[wrong_value] = identifier[elt] ,
identifier[wrapped_func] = identifier[validation_function_func] ,
identifier[validation_outcome] = identifier[e] )
keyword[if] keyword[not] identifier[result_is_success] ( identifier[res] ):
keyword[raise] identifier[InvalidItemInSequence] ( identifier[wrong_value] = identifier[elt] ,
identifier[wrapped_func] = identifier[validation_function_func] ,
identifier[validation_outcome] = identifier[res] )
keyword[return] keyword[True]
identifier[on_each_val] . identifier[__name__] = literal[string] literal[string] . identifier[format] ( literal[string] + literal[string] . identifier[join] ([ identifier[get_callable_name] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[validation_function_funcs] ])+ literal[string] )
keyword[return] identifier[on_each_val]
|
def on_each_(*validation_functions_collection):
"""
Generates a validation_function for collection inputs where each element of the input will be validated against the
corresponding validation_function(s) in the validation_functions_collection. Validators inside the tuple can be
provided as a list for convenience, this will be replaced with an 'and_' operator if the list has more than one
element.
Note that if you want to apply the SAME validation_functions to all elements in the input, you should rather use
on_all_.
:param validation_functions_collection: a sequence of (base validation function or list of base validation functions
to use).
A base validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or
a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list).
Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/)
expressions can be used instead of callables, they will be transformed to functions automatically.
:return:
"""
# create a tuple of validation functions.
validation_function_funcs = tuple((_process_validation_function_s(validation_func) for validation_func in validation_functions_collection))
# generate a validation function based on the tuple of validation_functions lists
def on_each_val(x): # type: Tuple
if len(validation_function_funcs) != len(x):
raise Failure('on_each_: x does not have the same number of elements than validation_functions_collection.') # depends on [control=['if'], data=[]]
else:
# apply each validation_function on the input with the same position in the collection
idx = -1
for (elt, validation_function_func) in zip(x, validation_function_funcs):
idx += 1
try:
res = validation_function_func(elt) # depends on [control=['try'], data=[]]
except Exception as e:
raise InvalidItemInSequence(wrong_value=elt, wrapped_func=validation_function_func, validation_outcome=e) # depends on [control=['except'], data=['e']]
if not result_is_success(res):
# one validation_function was unhappy > raise
# raise Failure('on_each_(' + str(validation_functions_collection) + '): _validation_function [' + str(idx)
# + '] (' + str(validation_functions_collection[idx]) + ') failed validation for '
# 'input ' + str(x[idx]))
raise InvalidItemInSequence(wrong_value=elt, wrapped_func=validation_function_func, validation_outcome=res) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return True
on_each_val.__name__ = 'map_<{}>_on_elts'.format('(' + ', '.join([get_callable_name(f) for f in validation_function_funcs]) + ')')
return on_each_val
|
def validate_sceneInfo(self):
"""Check scene name and whether remote file exists. Raises
WrongSceneNameError if the scene name is wrong.
"""
if self.sceneInfo.prefix not in self.__satellitesMap:
raise WrongSceneNameError('USGS Downloader: Prefix of %s (%s) is invalid'
% (self.sceneInfo.name, self.sceneInfo.prefix))
|
def function[validate_sceneInfo, parameter[self]]:
constant[Check scene name and whether remote file exists. Raises
WrongSceneNameError if the scene name is wrong.
]
if compare[name[self].sceneInfo.prefix <ast.NotIn object at 0x7da2590d7190> name[self].__satellitesMap] begin[:]
<ast.Raise object at 0x7da1b0242d70>
|
keyword[def] identifier[validate_sceneInfo] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[sceneInfo] . identifier[prefix] keyword[not] keyword[in] identifier[self] . identifier[__satellitesMap] :
keyword[raise] identifier[WrongSceneNameError] ( literal[string]
%( identifier[self] . identifier[sceneInfo] . identifier[name] , identifier[self] . identifier[sceneInfo] . identifier[prefix] ))
|
def validate_sceneInfo(self):
"""Check scene name and whether remote file exists. Raises
WrongSceneNameError if the scene name is wrong.
"""
if self.sceneInfo.prefix not in self.__satellitesMap:
raise WrongSceneNameError('USGS Downloader: Prefix of %s (%s) is invalid' % (self.sceneInfo.name, self.sceneInfo.prefix)) # depends on [control=['if'], data=[]]
|
def _get_options(**kwargs):
'''
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
'''
# Get repo options from the kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disableexcludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
setopt = kwargs.pop('setopt', None)
if setopt is None:
setopt = []
else:
setopt = salt.utils.args.split_input(setopt)
get_extra_options = kwargs.pop('get_extra_options', False)
# Support old 'repo' argument
if repo and not fromrepo:
fromrepo = repo
ret = []
if fromrepo:
log.info('Restricting to repo \'%s\'', fromrepo)
ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)])
else:
if disablerepo:
targets = [disablerepo] \
if not isinstance(disablerepo, list) \
else disablerepo
log.info('Disabling repo(s): %s', ', '.join(targets))
ret.extend(
['--disablerepo={0}'.format(x) for x in targets]
)
if enablerepo:
targets = [enablerepo] \
if not isinstance(enablerepo, list) \
else enablerepo
log.info('Enabling repo(s): %s', ', '.join(targets))
ret.extend(['--enablerepo={0}'.format(x) for x in targets])
if disableexcludes:
log.info('Disabling excludes for \'%s\'', disableexcludes)
ret.append('--disableexcludes={0}'.format(disableexcludes))
if branch:
log.info('Adding branch \'%s\'', branch)
ret.append('--branch={0}'.format(branch))
for item in setopt:
ret.extend(['--setopt', six.text_type(item)])
if get_extra_options:
# sorting here to make order uniform, makes unit testing more reliable
for key in sorted(kwargs):
if key.startswith('__'):
continue
value = kwargs[key]
if isinstance(value, six.string_types):
log.info('Found extra option --%s=%s', key, value)
ret.append('--{0}={1}'.format(key, value))
elif value is True:
log.info('Found extra option --%s', key)
ret.append('--{0}'.format(key))
if ret:
log.info('Adding extra options: %s', ret)
return ret
|
def function[_get_options, parameter[]]:
constant[
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
]
variable[fromrepo] assign[=] call[name[kwargs].pop, parameter[constant[fromrepo], constant[]]]
variable[repo] assign[=] call[name[kwargs].pop, parameter[constant[repo], constant[]]]
variable[disablerepo] assign[=] call[name[kwargs].pop, parameter[constant[disablerepo], constant[]]]
variable[enablerepo] assign[=] call[name[kwargs].pop, parameter[constant[enablerepo], constant[]]]
variable[disableexcludes] assign[=] call[name[kwargs].pop, parameter[constant[disableexcludes], constant[]]]
variable[branch] assign[=] call[name[kwargs].pop, parameter[constant[branch], constant[]]]
variable[setopt] assign[=] call[name[kwargs].pop, parameter[constant[setopt], constant[None]]]
if compare[name[setopt] is constant[None]] begin[:]
variable[setopt] assign[=] list[[]]
variable[get_extra_options] assign[=] call[name[kwargs].pop, parameter[constant[get_extra_options], constant[False]]]
if <ast.BoolOp object at 0x7da18f58e170> begin[:]
variable[fromrepo] assign[=] name[repo]
variable[ret] assign[=] list[[]]
if name[fromrepo] begin[:]
call[name[log].info, parameter[constant[Restricting to repo '%s'], name[fromrepo]]]
call[name[ret].extend, parameter[list[[<ast.Constant object at 0x7da18f00f4c0>, <ast.Call object at 0x7da18f00f640>]]]]
if name[disableexcludes] begin[:]
call[name[log].info, parameter[constant[Disabling excludes for '%s'], name[disableexcludes]]]
call[name[ret].append, parameter[call[constant[--disableexcludes={0}].format, parameter[name[disableexcludes]]]]]
if name[branch] begin[:]
call[name[log].info, parameter[constant[Adding branch '%s'], name[branch]]]
call[name[ret].append, parameter[call[constant[--branch={0}].format, parameter[name[branch]]]]]
for taget[name[item]] in starred[name[setopt]] begin[:]
call[name[ret].extend, parameter[list[[<ast.Constant object at 0x7da18dc04a00>, <ast.Call object at 0x7da18dc054b0>]]]]
if name[get_extra_options] begin[:]
for taget[name[key]] in starred[call[name[sorted], parameter[name[kwargs]]]] begin[:]
if call[name[key].startswith, parameter[constant[__]]] begin[:]
continue
variable[value] assign[=] call[name[kwargs]][name[key]]
if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:]
call[name[log].info, parameter[constant[Found extra option --%s=%s], name[key], name[value]]]
call[name[ret].append, parameter[call[constant[--{0}={1}].format, parameter[name[key], name[value]]]]]
if name[ret] begin[:]
call[name[log].info, parameter[constant[Adding extra options: %s], name[ret]]]
return[name[ret]]
|
keyword[def] identifier[_get_options] (** identifier[kwargs] ):
literal[string]
identifier[fromrepo] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[repo] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[disablerepo] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[enablerepo] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[disableexcludes] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[branch] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[setopt] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[setopt] keyword[is] keyword[None] :
identifier[setopt] =[]
keyword[else] :
identifier[setopt] = identifier[salt] . identifier[utils] . identifier[args] . identifier[split_input] ( identifier[setopt] )
identifier[get_extra_options] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
keyword[if] identifier[repo] keyword[and] keyword[not] identifier[fromrepo] :
identifier[fromrepo] = identifier[repo]
identifier[ret] =[]
keyword[if] identifier[fromrepo] :
identifier[log] . identifier[info] ( literal[string] , identifier[fromrepo] )
identifier[ret] . identifier[extend] ([ literal[string] , literal[string] . identifier[format] ( identifier[fromrepo] )])
keyword[else] :
keyword[if] identifier[disablerepo] :
identifier[targets] =[ identifier[disablerepo] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[disablerepo] , identifier[list] ) keyword[else] identifier[disablerepo]
identifier[log] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[targets] ))
identifier[ret] . identifier[extend] (
[ literal[string] . identifier[format] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[targets] ]
)
keyword[if] identifier[enablerepo] :
identifier[targets] =[ identifier[enablerepo] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[enablerepo] , identifier[list] ) keyword[else] identifier[enablerepo]
identifier[log] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[targets] ))
identifier[ret] . identifier[extend] ([ literal[string] . identifier[format] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[targets] ])
keyword[if] identifier[disableexcludes] :
identifier[log] . identifier[info] ( literal[string] , identifier[disableexcludes] )
identifier[ret] . identifier[append] ( literal[string] . identifier[format] ( identifier[disableexcludes] ))
keyword[if] identifier[branch] :
identifier[log] . identifier[info] ( literal[string] , identifier[branch] )
identifier[ret] . identifier[append] ( literal[string] . identifier[format] ( identifier[branch] ))
keyword[for] identifier[item] keyword[in] identifier[setopt] :
identifier[ret] . identifier[extend] ([ literal[string] , identifier[six] . identifier[text_type] ( identifier[item] )])
keyword[if] identifier[get_extra_options] :
keyword[for] identifier[key] keyword[in] identifier[sorted] ( identifier[kwargs] ):
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[value] = identifier[kwargs] [ identifier[key] ]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
identifier[log] . identifier[info] ( literal[string] , identifier[key] , identifier[value] )
identifier[ret] . identifier[append] ( literal[string] . identifier[format] ( identifier[key] , identifier[value] ))
keyword[elif] identifier[value] keyword[is] keyword[True] :
identifier[log] . identifier[info] ( literal[string] , identifier[key] )
identifier[ret] . identifier[append] ( literal[string] . identifier[format] ( identifier[key] ))
keyword[if] identifier[ret] :
identifier[log] . identifier[info] ( literal[string] , identifier[ret] )
keyword[return] identifier[ret]
|
def _get_options(**kwargs):
"""
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
"""
# Get repo options from the kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disableexcludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
setopt = kwargs.pop('setopt', None)
if setopt is None:
setopt = [] # depends on [control=['if'], data=['setopt']]
else:
setopt = salt.utils.args.split_input(setopt)
get_extra_options = kwargs.pop('get_extra_options', False)
# Support old 'repo' argument
if repo and (not fromrepo):
fromrepo = repo # depends on [control=['if'], data=[]]
ret = []
if fromrepo:
log.info("Restricting to repo '%s'", fromrepo)
ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)]) # depends on [control=['if'], data=[]]
else:
if disablerepo:
targets = [disablerepo] if not isinstance(disablerepo, list) else disablerepo
log.info('Disabling repo(s): %s', ', '.join(targets))
ret.extend(['--disablerepo={0}'.format(x) for x in targets]) # depends on [control=['if'], data=[]]
if enablerepo:
targets = [enablerepo] if not isinstance(enablerepo, list) else enablerepo
log.info('Enabling repo(s): %s', ', '.join(targets))
ret.extend(['--enablerepo={0}'.format(x) for x in targets]) # depends on [control=['if'], data=[]]
if disableexcludes:
log.info("Disabling excludes for '%s'", disableexcludes)
ret.append('--disableexcludes={0}'.format(disableexcludes)) # depends on [control=['if'], data=[]]
if branch:
log.info("Adding branch '%s'", branch)
ret.append('--branch={0}'.format(branch)) # depends on [control=['if'], data=[]]
for item in setopt:
ret.extend(['--setopt', six.text_type(item)]) # depends on [control=['for'], data=['item']]
if get_extra_options:
# sorting here to make order uniform, makes unit testing more reliable
for key in sorted(kwargs):
if key.startswith('__'):
continue # depends on [control=['if'], data=[]]
value = kwargs[key]
if isinstance(value, six.string_types):
log.info('Found extra option --%s=%s', key, value)
ret.append('--{0}={1}'.format(key, value)) # depends on [control=['if'], data=[]]
elif value is True:
log.info('Found extra option --%s', key)
ret.append('--{0}'.format(key)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
if ret:
log.info('Adding extra options: %s', ret) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return ret
|
def get_note(self, noteid, version=None):
""" Method to get a specific note
Arguments:
- noteid (string): ID of the note to get
- version (int): optional version of the note to get
Returns:
A tuple `(note, status)`
- note (dict): note object
- status (int): 0 on success and -1 otherwise
"""
# request note
params_version = ""
if version is not None:
params_version = '/v/' + str(version)
params = '/i/%s%s' % (str(noteid), params_version)
request = Request(DATA_URL+params)
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request)
except HTTPError as e:
if e.code == 401:
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')
else:
return e, -1
except IOError as e:
return e, -1
note = json.loads(response.read().decode('utf-8'))
note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get("X-Simperium-Version")))
# Sort tags
# For early versions of notes, tags not always available
if "tags" in note:
note["tags"] = sorted(note["tags"])
return note, 0
|
def function[get_note, parameter[self, noteid, version]]:
constant[ Method to get a specific note
Arguments:
- noteid (string): ID of the note to get
- version (int): optional version of the note to get
Returns:
A tuple `(note, status)`
- note (dict): note object
- status (int): 0 on success and -1 otherwise
]
variable[params_version] assign[=] constant[]
if compare[name[version] is_not constant[None]] begin[:]
variable[params_version] assign[=] binary_operation[constant[/v/] + call[name[str], parameter[name[version]]]]
variable[params] assign[=] binary_operation[constant[/i/%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f7203d0>, <ast.Name object at 0x7da18f720a00>]]]
variable[request] assign[=] call[name[Request], parameter[binary_operation[name[DATA_URL] + name[params]]]]
call[name[request].add_header, parameter[name[self].header, call[name[self].get_token, parameter[]]]]
<ast.Try object at 0x7da18f720790>
variable[note] assign[=] call[name[json].loads, parameter[call[call[name[response].read, parameter[]].decode, parameter[constant[utf-8]]]]]
variable[note] assign[=] call[name[self].__add_simplenote_api_fields, parameter[name[note], name[noteid], call[name[int], parameter[call[call[name[response].info, parameter[]].get, parameter[constant[X-Simperium-Version]]]]]]]
if compare[constant[tags] in name[note]] begin[:]
call[name[note]][constant[tags]] assign[=] call[name[sorted], parameter[call[name[note]][constant[tags]]]]
return[tuple[[<ast.Name object at 0x7da20c6ab850>, <ast.Constant object at 0x7da20c6a9c60>]]]
|
keyword[def] identifier[get_note] ( identifier[self] , identifier[noteid] , identifier[version] = keyword[None] ):
literal[string]
identifier[params_version] = literal[string]
keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] :
identifier[params_version] = literal[string] + identifier[str] ( identifier[version] )
identifier[params] = literal[string] %( identifier[str] ( identifier[noteid] ), identifier[params_version] )
identifier[request] = identifier[Request] ( identifier[DATA_URL] + identifier[params] )
identifier[request] . identifier[add_header] ( identifier[self] . identifier[header] , identifier[self] . identifier[get_token] ())
keyword[try] :
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[request] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[SimplenoteLoginFailed] ( literal[string] )
keyword[else] :
keyword[return] identifier[e] ,- literal[int]
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[return] identifier[e] ,- literal[int]
identifier[note] = identifier[json] . identifier[loads] ( identifier[response] . identifier[read] (). identifier[decode] ( literal[string] ))
identifier[note] = identifier[self] . identifier[__add_simplenote_api_fields] ( identifier[note] , identifier[noteid] , identifier[int] ( identifier[response] . identifier[info] (). identifier[get] ( literal[string] )))
keyword[if] literal[string] keyword[in] identifier[note] :
identifier[note] [ literal[string] ]= identifier[sorted] ( identifier[note] [ literal[string] ])
keyword[return] identifier[note] , literal[int]
|
def get_note(self, noteid, version=None):
""" Method to get a specific note
Arguments:
- noteid (string): ID of the note to get
- version (int): optional version of the note to get
Returns:
A tuple `(note, status)`
- note (dict): note object
- status (int): 0 on success and -1 otherwise
"""
# request note
params_version = ''
if version is not None:
params_version = '/v/' + str(version) # depends on [control=['if'], data=['version']]
params = '/i/%s%s' % (str(noteid), params_version)
request = Request(DATA_URL + params)
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request) # depends on [control=['try'], data=[]]
except HTTPError as e:
if e.code == 401:
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') # depends on [control=['if'], data=[]]
else:
return (e, -1) # depends on [control=['except'], data=['e']]
except IOError as e:
return (e, -1) # depends on [control=['except'], data=['e']]
note = json.loads(response.read().decode('utf-8'))
note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get('X-Simperium-Version')))
# Sort tags
# For early versions of notes, tags not always available
if 'tags' in note:
note['tags'] = sorted(note['tags']) # depends on [control=['if'], data=['note']]
return (note, 0)
|
def upgrade():
"""Upgrade database."""
op.create_table(
'access_actionssystemroles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('action', sa.String(length=80), nullable=True),
sa.Column('exclude', sa.Boolean(name='exclude'), server_default='0',
nullable=False),
sa.Column('argument', sa.String(length=255), nullable=True),
sa.Column('role_name', sa.String(length=40), nullable=False),
sa.PrimaryKeyConstraint('id',
name=op.f('pk_access_actionssystemroles')),
sa.UniqueConstraint('action', 'exclude', 'argument', 'role_name',
name='access_actionssystemroles_unique')
)
op.create_index(op.f('ix_access_actionssystemroles_action'),
'access_actionssystemroles', ['action'], unique=False)
op.create_index(op.f('ix_access_actionssystemroles_argument'),
'access_actionssystemroles', ['argument'], unique=False)
op.create_index(op.f('ix_access_actionssystemroles_role_name'),
'access_actionssystemroles', ['role_name'], unique=False)
|
def function[upgrade, parameter[]]:
constant[Upgrade database.]
call[name[op].create_table, parameter[constant[access_actionssystemroles], call[name[sa].Column, parameter[constant[id], call[name[sa].Integer, parameter[]]]], call[name[sa].Column, parameter[constant[action], call[name[sa].String, parameter[]]]], call[name[sa].Column, parameter[constant[exclude], call[name[sa].Boolean, parameter[]]]], call[name[sa].Column, parameter[constant[argument], call[name[sa].String, parameter[]]]], call[name[sa].Column, parameter[constant[role_name], call[name[sa].String, parameter[]]]], call[name[sa].PrimaryKeyConstraint, parameter[constant[id]]], call[name[sa].UniqueConstraint, parameter[constant[action], constant[exclude], constant[argument], constant[role_name]]]]]
call[name[op].create_index, parameter[call[name[op].f, parameter[constant[ix_access_actionssystemroles_action]]], constant[access_actionssystemroles], list[[<ast.Constant object at 0x7da204620910>]]]]
call[name[op].create_index, parameter[call[name[op].f, parameter[constant[ix_access_actionssystemroles_argument]]], constant[access_actionssystemroles], list[[<ast.Constant object at 0x7da18f720790>]]]]
call[name[op].create_index, parameter[call[name[op].f, parameter[constant[ix_access_actionssystemroles_role_name]]], constant[access_actionssystemroles], list[[<ast.Constant object at 0x7da18f723ac0>]]]]
|
keyword[def] identifier[upgrade] ():
literal[string]
identifier[op] . identifier[create_table] (
literal[string] ,
identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[Integer] (), identifier[nullable] = keyword[False] ),
identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[String] ( identifier[length] = literal[int] ), identifier[nullable] = keyword[True] ),
identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[Boolean] ( identifier[name] = literal[string] ), identifier[server_default] = literal[string] ,
identifier[nullable] = keyword[False] ),
identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[String] ( identifier[length] = literal[int] ), identifier[nullable] = keyword[True] ),
identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[String] ( identifier[length] = literal[int] ), identifier[nullable] = keyword[False] ),
identifier[sa] . identifier[PrimaryKeyConstraint] ( literal[string] ,
identifier[name] = identifier[op] . identifier[f] ( literal[string] )),
identifier[sa] . identifier[UniqueConstraint] ( literal[string] , literal[string] , literal[string] , literal[string] ,
identifier[name] = literal[string] )
)
identifier[op] . identifier[create_index] ( identifier[op] . identifier[f] ( literal[string] ),
literal[string] ,[ literal[string] ], identifier[unique] = keyword[False] )
identifier[op] . identifier[create_index] ( identifier[op] . identifier[f] ( literal[string] ),
literal[string] ,[ literal[string] ], identifier[unique] = keyword[False] )
identifier[op] . identifier[create_index] ( identifier[op] . identifier[f] ( literal[string] ),
literal[string] ,[ literal[string] ], identifier[unique] = keyword[False] )
|
def upgrade():
"""Upgrade database."""
op.create_table('access_actionssystemroles', sa.Column('id', sa.Integer(), nullable=False), sa.Column('action', sa.String(length=80), nullable=True), sa.Column('exclude', sa.Boolean(name='exclude'), server_default='0', nullable=False), sa.Column('argument', sa.String(length=255), nullable=True), sa.Column('role_name', sa.String(length=40), nullable=False), sa.PrimaryKeyConstraint('id', name=op.f('pk_access_actionssystemroles')), sa.UniqueConstraint('action', 'exclude', 'argument', 'role_name', name='access_actionssystemroles_unique'))
op.create_index(op.f('ix_access_actionssystemroles_action'), 'access_actionssystemroles', ['action'], unique=False)
op.create_index(op.f('ix_access_actionssystemroles_argument'), 'access_actionssystemroles', ['argument'], unique=False)
op.create_index(op.f('ix_access_actionssystemroles_role_name'), 'access_actionssystemroles', ['role_name'], unique=False)
|
def _generic_hook(self, name, **kwargs):
""" A generic hook that links the TemplateHelper with PluginManager """
entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None]
return "\n".join(entries)
|
def function[_generic_hook, parameter[self, name]]:
constant[ A generic hook that links the TemplateHelper with PluginManager ]
variable[entries] assign[=] <ast.ListComp object at 0x7da18f09cc10>
return[call[constant[
].join, parameter[name[entries]]]]
|
keyword[def] identifier[_generic_hook] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[entries] =[ identifier[entry] keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[_plugin_manager] . identifier[call_hook] ( identifier[name] ,** identifier[kwargs] ) keyword[if] identifier[entry] keyword[is] keyword[not] keyword[None] ]
keyword[return] literal[string] . identifier[join] ( identifier[entries] )
|
def _generic_hook(self, name, **kwargs):
""" A generic hook that links the TemplateHelper with PluginManager """
entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None]
return '\n'.join(entries)
|
def specificity(self):
r"""Return specificity.
Specificity is defined as :math:`\frac{tn}{tn + fp}`
AKA true negative rate (TNR)
Cf. https://en.wikipedia.org/wiki/Specificity_(tests)
Returns
-------
float
The specificity of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.specificity()
0.75
"""
if self._tn + self._fp == 0:
return float('NaN')
return self._tn / (self._tn + self._fp)
|
def function[specificity, parameter[self]]:
constant[Return specificity.
Specificity is defined as :math:`\frac{tn}{tn + fp}`
AKA true negative rate (TNR)
Cf. https://en.wikipedia.org/wiki/Specificity_(tests)
Returns
-------
float
The specificity of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.specificity()
0.75
]
if compare[binary_operation[name[self]._tn + name[self]._fp] equal[==] constant[0]] begin[:]
return[call[name[float], parameter[constant[NaN]]]]
return[binary_operation[name[self]._tn / binary_operation[name[self]._tn + name[self]._fp]]]
|
keyword[def] identifier[specificity] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_tn] + identifier[self] . identifier[_fp] == literal[int] :
keyword[return] identifier[float] ( literal[string] )
keyword[return] identifier[self] . identifier[_tn] /( identifier[self] . identifier[_tn] + identifier[self] . identifier[_fp] )
|
def specificity(self):
"""Return specificity.
Specificity is defined as :math:`\\frac{tn}{tn + fp}`
AKA true negative rate (TNR)
Cf. https://en.wikipedia.org/wiki/Specificity_(tests)
Returns
-------
float
The specificity of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.specificity()
0.75
"""
if self._tn + self._fp == 0:
return float('NaN') # depends on [control=['if'], data=[]]
return self._tn / (self._tn + self._fp)
|
def add_config(parser):
""" add config """
# the default config path
default_config_path = config.get_heron_conf_dir()
parser.add_argument(
'--config-path',
metavar='(a string; path to cluster config; default: "' + default_config_path + '")',
default=os.path.join(config.get_heron_dir(), default_config_path))
return parser
|
def function[add_config, parameter[parser]]:
constant[ add config ]
variable[default_config_path] assign[=] call[name[config].get_heron_conf_dir, parameter[]]
call[name[parser].add_argument, parameter[constant[--config-path]]]
return[name[parser]]
|
keyword[def] identifier[add_config] ( identifier[parser] ):
literal[string]
identifier[default_config_path] = identifier[config] . identifier[get_heron_conf_dir] ()
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] + identifier[default_config_path] + literal[string] ,
identifier[default] = identifier[os] . identifier[path] . identifier[join] ( identifier[config] . identifier[get_heron_dir] (), identifier[default_config_path] ))
keyword[return] identifier[parser]
|
def add_config(parser):
""" add config """
# the default config path
default_config_path = config.get_heron_conf_dir()
parser.add_argument('--config-path', metavar='(a string; path to cluster config; default: "' + default_config_path + '")', default=os.path.join(config.get_heron_dir(), default_config_path))
return parser
|
def _get_mpr_view(self, connection, table):
""" Finds and returns view name in the sqlite db represented by given connection.
Args:
connection: connection to sqlite db where to look for partition table.
table (orm.Table):
Raises:
MissingViewError: if database does not have partition table.
Returns:
str: database table storing partition data.
"""
logger.debug(
'Looking for view of the table.\n table: {}'.format(table.vid))
view = self.get_view_name(table)
view_exists = self._relation_exists(connection, view)
if view_exists:
logger.debug(
'View of the table exists.\n table: {}, view: {}'
.format(table.vid, view))
return view
raise MissingViewError('sqlite database does not have view for {} table.'
.format(table.vid))
|
def function[_get_mpr_view, parameter[self, connection, table]]:
constant[ Finds and returns view name in the sqlite db represented by given connection.
Args:
connection: connection to sqlite db where to look for partition table.
table (orm.Table):
Raises:
MissingViewError: if database does not have partition table.
Returns:
str: database table storing partition data.
]
call[name[logger].debug, parameter[call[constant[Looking for view of the table.
table: {}].format, parameter[name[table].vid]]]]
variable[view] assign[=] call[name[self].get_view_name, parameter[name[table]]]
variable[view_exists] assign[=] call[name[self]._relation_exists, parameter[name[connection], name[view]]]
if name[view_exists] begin[:]
call[name[logger].debug, parameter[call[constant[View of the table exists.
table: {}, view: {}].format, parameter[name[table].vid, name[view]]]]]
return[name[view]]
<ast.Raise object at 0x7da20e9555a0>
|
keyword[def] identifier[_get_mpr_view] ( identifier[self] , identifier[connection] , identifier[table] ):
literal[string]
identifier[logger] . identifier[debug] (
literal[string] . identifier[format] ( identifier[table] . identifier[vid] ))
identifier[view] = identifier[self] . identifier[get_view_name] ( identifier[table] )
identifier[view_exists] = identifier[self] . identifier[_relation_exists] ( identifier[connection] , identifier[view] )
keyword[if] identifier[view_exists] :
identifier[logger] . identifier[debug] (
literal[string]
. identifier[format] ( identifier[table] . identifier[vid] , identifier[view] ))
keyword[return] identifier[view]
keyword[raise] identifier[MissingViewError] ( literal[string]
. identifier[format] ( identifier[table] . identifier[vid] ))
|
def _get_mpr_view(self, connection, table):
""" Finds and returns view name in the sqlite db represented by given connection.
Args:
connection: connection to sqlite db where to look for partition table.
table (orm.Table):
Raises:
MissingViewError: if database does not have partition table.
Returns:
str: database table storing partition data.
"""
logger.debug('Looking for view of the table.\n table: {}'.format(table.vid))
view = self.get_view_name(table)
view_exists = self._relation_exists(connection, view)
if view_exists:
logger.debug('View of the table exists.\n table: {}, view: {}'.format(table.vid, view))
return view # depends on [control=['if'], data=[]]
raise MissingViewError('sqlite database does not have view for {} table.'.format(table.vid))
|
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
"""Generate a dictionary of snap install information from origin
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
if not src.startswith('snap:'):
juju_log("Snap source is not a snap origin", 'WARN')
return {}
_src = src[5:]
channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode}
for snap in snaps}
|
def function[get_snaps_install_info_from_origin, parameter[snaps, src, mode]]:
constant[Generate a dictionary of snap install information from origin
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
]
if <ast.UnaryOp object at 0x7da1b1219cf0> begin[:]
call[name[juju_log], parameter[constant[Snap source is not a snap origin], constant[WARN]]]
return[dictionary[[], []]]
variable[_src] assign[=] call[name[src]][<ast.Slice object at 0x7da1b121ba60>]
variable[channel] assign[=] call[constant[--channel={}].format, parameter[name[_src]]]
return[<ast.DictComp object at 0x7da1b121b730>]
|
keyword[def] identifier[get_snaps_install_info_from_origin] ( identifier[snaps] , identifier[src] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[src] . identifier[startswith] ( literal[string] ):
identifier[juju_log] ( literal[string] , literal[string] )
keyword[return] {}
identifier[_src] = identifier[src] [ literal[int] :]
identifier[channel] = literal[string] . identifier[format] ( identifier[_src] )
keyword[return] { identifier[snap] :{ literal[string] : identifier[channel] , literal[string] : identifier[mode] }
keyword[for] identifier[snap] keyword[in] identifier[snaps] }
|
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
"""Generate a dictionary of snap install information from origin
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
if not src.startswith('snap:'):
juju_log('Snap source is not a snap origin', 'WARN')
return {} # depends on [control=['if'], data=[]]
_src = src[5:]
channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode} for snap in snaps}
|
def app_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/apps#get-information-about-app"
api_path = "/api/v2/apps/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
def function[app_show, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/core/apps#get-information-about-app]
variable[api_path] assign[=] constant[/api/v2/apps/{id}.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]]
|
keyword[def] identifier[app_show] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] )
|
def app_show(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/core/apps#get-information-about-app"""
api_path = '/api/v2/apps/{id}.json'
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
def waitUntilFinished(self):
"""
Processes the main thread until the loading process has finished. This
is a way to force the main thread to be synchronous in its execution.
"""
QtCore.QCoreApplication.processEvents()
while self.isLoading():
QtCore.QCoreApplication.processEvents()
|
def function[waitUntilFinished, parameter[self]]:
constant[
Processes the main thread until the loading process has finished. This
is a way to force the main thread to be synchronous in its execution.
]
call[name[QtCore].QCoreApplication.processEvents, parameter[]]
while call[name[self].isLoading, parameter[]] begin[:]
call[name[QtCore].QCoreApplication.processEvents, parameter[]]
|
keyword[def] identifier[waitUntilFinished] ( identifier[self] ):
literal[string]
identifier[QtCore] . identifier[QCoreApplication] . identifier[processEvents] ()
keyword[while] identifier[self] . identifier[isLoading] ():
identifier[QtCore] . identifier[QCoreApplication] . identifier[processEvents] ()
|
def waitUntilFinished(self):
"""
Processes the main thread until the loading process has finished. This
is a way to force the main thread to be synchronous in its execution.
"""
QtCore.QCoreApplication.processEvents()
while self.isLoading():
QtCore.QCoreApplication.processEvents() # depends on [control=['while'], data=[]]
|
def libpath(self):
"""Returns the full path to the shared *wrapper* library created for the
module.
"""
from os import path
return path.join(self.dirpath, self.libname)
|
def function[libpath, parameter[self]]:
constant[Returns the full path to the shared *wrapper* library created for the
module.
]
from relative_module[os] import module[path]
return[call[name[path].join, parameter[name[self].dirpath, name[self].libname]]]
|
keyword[def] identifier[libpath] ( identifier[self] ):
literal[string]
keyword[from] identifier[os] keyword[import] identifier[path]
keyword[return] identifier[path] . identifier[join] ( identifier[self] . identifier[dirpath] , identifier[self] . identifier[libname] )
|
def libpath(self):
"""Returns the full path to the shared *wrapper* library created for the
module.
"""
from os import path
return path.join(self.dirpath, self.libname)
|
def apply(self, *args, **kwargs):
"""Applies function along input axis of DataFrame.
Wrapper around the :meth:`pandas.DataFrame.apply` method.
"""
return self.__class__(self._frame.apply(*args, **kwargs),
metadata=self.metadata,
_metadata=self._metadata)
|
def function[apply, parameter[self]]:
constant[Applies function along input axis of DataFrame.
Wrapper around the :meth:`pandas.DataFrame.apply` method.
]
return[call[name[self].__class__, parameter[call[name[self]._frame.apply, parameter[<ast.Starred object at 0x7da1b26f2b60>]]]]]
|
keyword[def] identifier[apply] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[__class__] ( identifier[self] . identifier[_frame] . identifier[apply] (* identifier[args] ,** identifier[kwargs] ),
identifier[metadata] = identifier[self] . identifier[metadata] ,
identifier[_metadata] = identifier[self] . identifier[_metadata] )
|
def apply(self, *args, **kwargs):
"""Applies function along input axis of DataFrame.
Wrapper around the :meth:`pandas.DataFrame.apply` method.
"""
return self.__class__(self._frame.apply(*args, **kwargs), metadata=self.metadata, _metadata=self._metadata)
|
def selection(self):
"""
Selection property.
:return: None if no font is selected and font family name if one is selected.
:rtype: None or str
"""
selection = self.listbox.curselection()
if len(selection) is 0:
return None
return self.font_indexes[self.listbox.curselection()[0]]
|
def function[selection, parameter[self]]:
constant[
Selection property.
:return: None if no font is selected and font family name if one is selected.
:rtype: None or str
]
variable[selection] assign[=] call[name[self].listbox.curselection, parameter[]]
if compare[call[name[len], parameter[name[selection]]] is constant[0]] begin[:]
return[constant[None]]
return[call[name[self].font_indexes][call[call[name[self].listbox.curselection, parameter[]]][constant[0]]]]
|
keyword[def] identifier[selection] ( identifier[self] ):
literal[string]
identifier[selection] = identifier[self] . identifier[listbox] . identifier[curselection] ()
keyword[if] identifier[len] ( identifier[selection] ) keyword[is] literal[int] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[font_indexes] [ identifier[self] . identifier[listbox] . identifier[curselection] ()[ literal[int] ]]
|
def selection(self):
"""
Selection property.
:return: None if no font is selected and font family name if one is selected.
:rtype: None or str
"""
selection = self.listbox.curselection()
if len(selection) is 0:
return None # depends on [control=['if'], data=[]]
return self.font_indexes[self.listbox.curselection()[0]]
|
def _get_json(self, url):
""" Get json from url
"""
self.log.info(u"/GET " + url)
r = requests.get(url)
if hasattr(r, 'from_cache'):
if r.from_cache:
self.log.info("(from cache)")
if r.status_code != 200:
throw_request_err(r)
return r.json()
|
def function[_get_json, parameter[self, url]]:
constant[ Get json from url
]
call[name[self].log.info, parameter[binary_operation[constant[/GET ] + name[url]]]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
if call[name[hasattr], parameter[name[r], constant[from_cache]]] begin[:]
if name[r].from_cache begin[:]
call[name[self].log.info, parameter[constant[(from cache)]]]
if compare[name[r].status_code not_equal[!=] constant[200]] begin[:]
call[name[throw_request_err], parameter[name[r]]]
return[call[name[r].json, parameter[]]]
|
keyword[def] identifier[_get_json] ( identifier[self] , identifier[url] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] + identifier[url] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[if] identifier[hasattr] ( identifier[r] , literal[string] ):
keyword[if] identifier[r] . identifier[from_cache] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[if] identifier[r] . identifier[status_code] != literal[int] :
identifier[throw_request_err] ( identifier[r] )
keyword[return] identifier[r] . identifier[json] ()
|
def _get_json(self, url):
""" Get json from url
"""
self.log.info(u'/GET ' + url)
r = requests.get(url)
if hasattr(r, 'from_cache'):
if r.from_cache:
self.log.info('(from cache)') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if r.status_code != 200:
throw_request_err(r) # depends on [control=['if'], data=[]]
return r.json()
|
def release_filename(self, id_):
"""Release a file name.
"""
entry = self.__entries.get(id_)
if entry is None:
raise ValueError("Invalid filename id (%d)" % id_)
# Decrease reference count and check if the entry has to be removed...
if entry.dec_ref_count() == 0:
del self.__entries[id_]
del self.__id_lut[entry.filename]
|
def function[release_filename, parameter[self, id_]]:
constant[Release a file name.
]
variable[entry] assign[=] call[name[self].__entries.get, parameter[name[id_]]]
if compare[name[entry] is constant[None]] begin[:]
<ast.Raise object at 0x7da2054a4a90>
if compare[call[name[entry].dec_ref_count, parameter[]] equal[==] constant[0]] begin[:]
<ast.Delete object at 0x7da1b1305330>
<ast.Delete object at 0x7da1b1306e60>
|
keyword[def] identifier[release_filename] ( identifier[self] , identifier[id_] ):
literal[string]
identifier[entry] = identifier[self] . identifier[__entries] . identifier[get] ( identifier[id_] )
keyword[if] identifier[entry] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[id_] )
keyword[if] identifier[entry] . identifier[dec_ref_count] ()== literal[int] :
keyword[del] identifier[self] . identifier[__entries] [ identifier[id_] ]
keyword[del] identifier[self] . identifier[__id_lut] [ identifier[entry] . identifier[filename] ]
|
def release_filename(self, id_):
"""Release a file name.
"""
entry = self.__entries.get(id_)
if entry is None:
raise ValueError('Invalid filename id (%d)' % id_) # depends on [control=['if'], data=[]]
# Decrease reference count and check if the entry has to be removed...
if entry.dec_ref_count() == 0:
del self.__entries[id_]
del self.__id_lut[entry.filename] # depends on [control=['if'], data=[]]
|
async def get_alarms(self):
"""Get alarms for a Netdata instance."""
url = '{}{}'.format(self.base_url, self.endpoint)
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url)
_LOGGER.debug(
"Response from Netdata: %s", response.status)
data = await response.text()
_LOGGER.debug(data)
self.alarms = data
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error("Can not load data from Netdata")
raise exceptions.NetdataConnectionError()
|
<ast.AsyncFunctionDef object at 0x7da1b19d8490>
|
keyword[async] keyword[def] identifier[get_alarms] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[base_url] , identifier[self] . identifier[endpoint] )
keyword[try] :
keyword[with] identifier[async_timeout] . identifier[timeout] ( literal[int] , identifier[loop] = identifier[self] . identifier[_loop] ):
identifier[response] = keyword[await] identifier[self] . identifier[_session] . identifier[get] ( identifier[url] )
identifier[_LOGGER] . identifier[debug] (
literal[string] , identifier[response] . identifier[status] )
identifier[data] = keyword[await] identifier[response] . identifier[text] ()
identifier[_LOGGER] . identifier[debug] ( identifier[data] )
identifier[self] . identifier[alarms] = identifier[data]
keyword[except] ( identifier[asyncio] . identifier[TimeoutError] , identifier[aiohttp] . identifier[ClientError] , identifier[socket] . identifier[gaierror] ):
identifier[_LOGGER] . identifier[error] ( literal[string] )
keyword[raise] identifier[exceptions] . identifier[NetdataConnectionError] ()
|
async def get_alarms(self):
"""Get alarms for a Netdata instance."""
url = '{}{}'.format(self.base_url, self.endpoint)
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url) # depends on [control=['with'], data=[]]
_LOGGER.debug('Response from Netdata: %s', response.status)
data = await response.text()
_LOGGER.debug(data)
self.alarms = data # depends on [control=['try'], data=[]]
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error('Can not load data from Netdata')
raise exceptions.NetdataConnectionError() # depends on [control=['except'], data=[]]
|
def invoke(self):
"""
Invoke this handler.
"""
args = self._get_args()
self._action(*args)
for callback in self._post_callbacks:
callback()
|
def function[invoke, parameter[self]]:
constant[
Invoke this handler.
]
variable[args] assign[=] call[name[self]._get_args, parameter[]]
call[name[self]._action, parameter[<ast.Starred object at 0x7da1b1a8e080>]]
for taget[name[callback]] in starred[name[self]._post_callbacks] begin[:]
call[name[callback], parameter[]]
|
keyword[def] identifier[invoke] ( identifier[self] ):
literal[string]
identifier[args] = identifier[self] . identifier[_get_args] ()
identifier[self] . identifier[_action] (* identifier[args] )
keyword[for] identifier[callback] keyword[in] identifier[self] . identifier[_post_callbacks] :
identifier[callback] ()
|
def invoke(self):
"""
Invoke this handler.
"""
args = self._get_args()
self._action(*args)
for callback in self._post_callbacks:
callback() # depends on [control=['for'], data=['callback']]
|
def connect_model(self, model):
"""Link the Database to the Model instance.
In case a new database is created from scratch, ``connect_model``
creates Trace objects for all tallyable pymc objects defined in
`model`.
If the database is being loaded from an existing file, ``connect_model``
restore the objects trace to their stored value.
:Parameters:
model : pymc.Model instance
An instance holding the pymc objects defining a statistical
model (stochastics, deterministics, data, ...)
"""
# Changed this to allow non-Model models. -AP
# We could also remove it altogether. -DH
if isinstance(model, pymc.Model):
self.model = model
else:
raise AttributeError('Not a Model instance.')
# Restore the state of the Model from an existing Database.
# The `load` method will have already created the Trace objects.
if hasattr(self, '_state_'):
names = set()
for morenames in self.trace_names:
names.update(morenames)
for name, fun in six.iteritems(model._funs_to_tally):
if name in self._traces:
self._traces[name]._getfunc = fun
names.discard(name)
# if len(names) > 0:
# print_("Some objects from the database have not been assigned a
# getfunc", names)
# Create a fresh new state.
# We will be able to remove this when we deprecate traces on objects.
else:
for name, fun in six.iteritems(model._funs_to_tally):
if name not in self._traces:
self._traces[
name] = self.__Trace__(
name=name,
getfunc=fun,
db=self)
|
def function[connect_model, parameter[self, model]]:
constant[Link the Database to the Model instance.
In case a new database is created from scratch, ``connect_model``
creates Trace objects for all tallyable pymc objects defined in
`model`.
If the database is being loaded from an existing file, ``connect_model``
restore the objects trace to their stored value.
:Parameters:
model : pymc.Model instance
An instance holding the pymc objects defining a statistical
model (stochastics, deterministics, data, ...)
]
if call[name[isinstance], parameter[name[model], name[pymc].Model]] begin[:]
name[self].model assign[=] name[model]
if call[name[hasattr], parameter[name[self], constant[_state_]]] begin[:]
variable[names] assign[=] call[name[set], parameter[]]
for taget[name[morenames]] in starred[name[self].trace_names] begin[:]
call[name[names].update, parameter[name[morenames]]]
for taget[tuple[[<ast.Name object at 0x7da18c4ce2f0>, <ast.Name object at 0x7da18c4cfca0>]]] in starred[call[name[six].iteritems, parameter[name[model]._funs_to_tally]]] begin[:]
if compare[name[name] in name[self]._traces] begin[:]
call[name[self]._traces][name[name]]._getfunc assign[=] name[fun]
call[name[names].discard, parameter[name[name]]]
|
keyword[def] identifier[connect_model] ( identifier[self] , identifier[model] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[model] , identifier[pymc] . identifier[Model] ):
identifier[self] . identifier[model] = identifier[model]
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[names] = identifier[set] ()
keyword[for] identifier[morenames] keyword[in] identifier[self] . identifier[trace_names] :
identifier[names] . identifier[update] ( identifier[morenames] )
keyword[for] identifier[name] , identifier[fun] keyword[in] identifier[six] . identifier[iteritems] ( identifier[model] . identifier[_funs_to_tally] ):
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[_traces] :
identifier[self] . identifier[_traces] [ identifier[name] ]. identifier[_getfunc] = identifier[fun]
identifier[names] . identifier[discard] ( identifier[name] )
keyword[else] :
keyword[for] identifier[name] , identifier[fun] keyword[in] identifier[six] . identifier[iteritems] ( identifier[model] . identifier[_funs_to_tally] ):
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_traces] :
identifier[self] . identifier[_traces] [
identifier[name] ]= identifier[self] . identifier[__Trace__] (
identifier[name] = identifier[name] ,
identifier[getfunc] = identifier[fun] ,
identifier[db] = identifier[self] )
|
def connect_model(self, model):
"""Link the Database to the Model instance.
In case a new database is created from scratch, ``connect_model``
creates Trace objects for all tallyable pymc objects defined in
`model`.
If the database is being loaded from an existing file, ``connect_model``
restore the objects trace to their stored value.
:Parameters:
model : pymc.Model instance
An instance holding the pymc objects defining a statistical
model (stochastics, deterministics, data, ...)
"""
# Changed this to allow non-Model models. -AP
# We could also remove it altogether. -DH
if isinstance(model, pymc.Model):
self.model = model # depends on [control=['if'], data=[]]
else:
raise AttributeError('Not a Model instance.')
# Restore the state of the Model from an existing Database.
# The `load` method will have already created the Trace objects.
if hasattr(self, '_state_'):
names = set()
for morenames in self.trace_names:
names.update(morenames) # depends on [control=['for'], data=['morenames']]
for (name, fun) in six.iteritems(model._funs_to_tally):
if name in self._traces:
self._traces[name]._getfunc = fun
names.discard(name) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# if len(names) > 0:
# print_("Some objects from the database have not been assigned a
# getfunc", names)
# Create a fresh new state.
# We will be able to remove this when we deprecate traces on objects.
for (name, fun) in six.iteritems(model._funs_to_tally):
if name not in self._traces:
self._traces[name] = self.__Trace__(name=name, getfunc=fun, db=self) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]]
|
def replySocket(self, port, host='*'):
'''
Create a REP-style socket for servers
'''
try:
socket = self._context.socket(zmq.REP)
socket.bind(self.tcpAddress(host, port))
except Exception as e:
newMsg= str("%s %s:%d" % (str(e), host, port))
raise (type(e))(newMsg)
return socket
|
def function[replySocket, parameter[self, port, host]]:
constant[
Create a REP-style socket for servers
]
<ast.Try object at 0x7da18bc73c70>
return[name[socket]]
|
keyword[def] identifier[replySocket] ( identifier[self] , identifier[port] , identifier[host] = literal[string] ):
literal[string]
keyword[try] :
identifier[socket] = identifier[self] . identifier[_context] . identifier[socket] ( identifier[zmq] . identifier[REP] )
identifier[socket] . identifier[bind] ( identifier[self] . identifier[tcpAddress] ( identifier[host] , identifier[port] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[newMsg] = identifier[str] ( literal[string] %( identifier[str] ( identifier[e] ), identifier[host] , identifier[port] ))
keyword[raise] ( identifier[type] ( identifier[e] ))( identifier[newMsg] )
keyword[return] identifier[socket]
|
def replySocket(self, port, host='*'):
"""
Create a REP-style socket for servers
"""
try:
socket = self._context.socket(zmq.REP)
socket.bind(self.tcpAddress(host, port)) # depends on [control=['try'], data=[]]
except Exception as e:
newMsg = str('%s %s:%d' % (str(e), host, port))
raise type(e)(newMsg) # depends on [control=['except'], data=['e']]
return socket
|
def nonperiodic_lightcurve_features(times, mags, errs, magsarefluxes=False):
'''This calculates the following nonperiodic features of the light curve,
listed in Richards, et al. 2011):
- amplitude
- beyond1std
- flux_percentile_ratio_mid20
- flux_percentile_ratio_mid35
- flux_percentile_ratio_mid50
- flux_percentile_ratio_mid65
- flux_percentile_ratio_mid80
- linear_trend
- max_slope
- median_absolute_deviation
- median_buffer_range_percentage
- pair_slope_trend
- percent_amplitude
- percent_difference_flux_percentile
- skew
- stdev
- timelength
- mintime
- maxtime
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to process.
magsarefluxes : bool
If True, will treat values in `mags` as fluxes instead of magnitudes.
Returns
-------
dict
A dict containing all of the features listed above.
'''
# remove nans first
finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]
# remove zero errors
nzind = npnonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
ndet = len(fmags)
if ndet > 9:
# calculate the moments
moments = lightcurve_moments(ftimes, fmags, ferrs)
# calculate the flux measures
fluxmeasures = lightcurve_flux_measures(ftimes, fmags, ferrs,
magsarefluxes=magsarefluxes)
# calculate the point-to-point measures
ptpmeasures = lightcurve_ptp_measures(ftimes, fmags, ferrs)
# get the length in time
mintime, maxtime = npmin(ftimes), npmax(ftimes)
timelength = maxtime - mintime
# get the amplitude
series_amplitude = 0.5*(npmax(fmags) - npmin(fmags))
# calculate the linear fit to the entire mag series
fitcoeffs = nppolyfit(ftimes, fmags, 1, w=1.0/(ferrs*ferrs))
series_linear_slope = fitcoeffs[1]
# roll fmags by 1
rolled_fmags = nproll(fmags,1)
# calculate the magnitude ratio (from the WISE paper)
series_magratio = (
(npmax(fmags) - moments['median']) / (npmax(fmags) - npmin(fmags) )
)
# this is the dictionary returned containing all the measures
measures = {
'ndet':fmags.size,
'mintime':mintime,
'maxtime':maxtime,
'timelength':timelength,
'amplitude':series_amplitude,
'ndetobslength_ratio':ndet/timelength,
'linear_fit_slope':series_linear_slope,
'magnitude_ratio':series_magratio,
}
if moments:
measures.update(moments)
if ptpmeasures:
measures.update(ptpmeasures)
if fluxmeasures:
measures.update(fluxmeasures)
return measures
else:
LOGERROR('not enough detections in this magseries '
'to calculate non-periodic features')
return None
|
def function[nonperiodic_lightcurve_features, parameter[times, mags, errs, magsarefluxes]]:
constant[This calculates the following nonperiodic features of the light curve,
listed in Richards, et al. 2011):
- amplitude
- beyond1std
- flux_percentile_ratio_mid20
- flux_percentile_ratio_mid35
- flux_percentile_ratio_mid50
- flux_percentile_ratio_mid65
- flux_percentile_ratio_mid80
- linear_trend
- max_slope
- median_absolute_deviation
- median_buffer_range_percentage
- pair_slope_trend
- percent_amplitude
- percent_difference_flux_percentile
- skew
- stdev
- timelength
- mintime
- maxtime
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to process.
magsarefluxes : bool
If True, will treat values in `mags` as fluxes instead of magnitudes.
Returns
-------
dict
A dict containing all of the features listed above.
]
variable[finiteind] assign[=] binary_operation[binary_operation[call[name[npisfinite], parameter[name[times]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[npisfinite], parameter[name[mags]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[npisfinite], parameter[name[errs]]]]
<ast.Tuple object at 0x7da1b008c0a0> assign[=] tuple[[<ast.Subscript object at 0x7da1b008c3d0>, <ast.Subscript object at 0x7da1b008d1e0>, <ast.Subscript object at 0x7da1b008c850>]]
variable[nzind] assign[=] call[name[npnonzero], parameter[name[ferrs]]]
<ast.Tuple object at 0x7da1b008cc70> assign[=] tuple[[<ast.Subscript object at 0x7da1b00dbb50>, <ast.Subscript object at 0x7da1b00db850>, <ast.Subscript object at 0x7da1b00dab90>]]
variable[ndet] assign[=] call[name[len], parameter[name[fmags]]]
if compare[name[ndet] greater[>] constant[9]] begin[:]
variable[moments] assign[=] call[name[lightcurve_moments], parameter[name[ftimes], name[fmags], name[ferrs]]]
variable[fluxmeasures] assign[=] call[name[lightcurve_flux_measures], parameter[name[ftimes], name[fmags], name[ferrs]]]
variable[ptpmeasures] assign[=] call[name[lightcurve_ptp_measures], parameter[name[ftimes], name[fmags], name[ferrs]]]
<ast.Tuple object at 0x7da1b00dac80> assign[=] tuple[[<ast.Call object at 0x7da1b00d81c0>, <ast.Call object at 0x7da1b00d8d00>]]
variable[timelength] assign[=] binary_operation[name[maxtime] - name[mintime]]
variable[series_amplitude] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[npmax], parameter[name[fmags]]] - call[name[npmin], parameter[name[fmags]]]]]
variable[fitcoeffs] assign[=] call[name[nppolyfit], parameter[name[ftimes], name[fmags], constant[1]]]
variable[series_linear_slope] assign[=] call[name[fitcoeffs]][constant[1]]
variable[rolled_fmags] assign[=] call[name[nproll], parameter[name[fmags], constant[1]]]
variable[series_magratio] assign[=] binary_operation[binary_operation[call[name[npmax], parameter[name[fmags]]] - call[name[moments]][constant[median]]] / binary_operation[call[name[npmax], parameter[name[fmags]]] - call[name[npmin], parameter[name[fmags]]]]]
variable[measures] assign[=] dictionary[[<ast.Constant object at 0x7da1b00da170>, <ast.Constant object at 0x7da1b00d9ff0>, <ast.Constant object at 0x7da1b00db4c0>, <ast.Constant object at 0x7da1b00dace0>, <ast.Constant object at 0x7da1b00d88e0>, <ast.Constant object at 0x7da1b00d95a0>, <ast.Constant object at 0x7da1b00daef0>, <ast.Constant object at 0x7da1b00d8c40>], [<ast.Attribute object at 0x7da1b00d8430>, <ast.Name object at 0x7da1b00db250>, <ast.Name object at 0x7da1b00db6d0>, <ast.Name object at 0x7da1b00d8df0>, <ast.Name object at 0x7da1b00d83d0>, <ast.BinOp object at 0x7da1b00db7c0>, <ast.Name object at 0x7da1b00d9660>, <ast.Name object at 0x7da1b00db220>]]
if name[moments] begin[:]
call[name[measures].update, parameter[name[moments]]]
if name[ptpmeasures] begin[:]
call[name[measures].update, parameter[name[ptpmeasures]]]
if name[fluxmeasures] begin[:]
call[name[measures].update, parameter[name[fluxmeasures]]]
return[name[measures]]
|
keyword[def] identifier[nonperiodic_lightcurve_features] ( identifier[times] , identifier[mags] , identifier[errs] , identifier[magsarefluxes] = keyword[False] ):
literal[string]
identifier[finiteind] = identifier[npisfinite] ( identifier[times] )& identifier[npisfinite] ( identifier[mags] )& identifier[npisfinite] ( identifier[errs] )
identifier[ftimes] , identifier[fmags] , identifier[ferrs] = identifier[times] [ identifier[finiteind] ], identifier[mags] [ identifier[finiteind] ], identifier[errs] [ identifier[finiteind] ]
identifier[nzind] = identifier[npnonzero] ( identifier[ferrs] )
identifier[ftimes] , identifier[fmags] , identifier[ferrs] = identifier[ftimes] [ identifier[nzind] ], identifier[fmags] [ identifier[nzind] ], identifier[ferrs] [ identifier[nzind] ]
identifier[ndet] = identifier[len] ( identifier[fmags] )
keyword[if] identifier[ndet] > literal[int] :
identifier[moments] = identifier[lightcurve_moments] ( identifier[ftimes] , identifier[fmags] , identifier[ferrs] )
identifier[fluxmeasures] = identifier[lightcurve_flux_measures] ( identifier[ftimes] , identifier[fmags] , identifier[ferrs] ,
identifier[magsarefluxes] = identifier[magsarefluxes] )
identifier[ptpmeasures] = identifier[lightcurve_ptp_measures] ( identifier[ftimes] , identifier[fmags] , identifier[ferrs] )
identifier[mintime] , identifier[maxtime] = identifier[npmin] ( identifier[ftimes] ), identifier[npmax] ( identifier[ftimes] )
identifier[timelength] = identifier[maxtime] - identifier[mintime]
identifier[series_amplitude] = literal[int] *( identifier[npmax] ( identifier[fmags] )- identifier[npmin] ( identifier[fmags] ))
identifier[fitcoeffs] = identifier[nppolyfit] ( identifier[ftimes] , identifier[fmags] , literal[int] , identifier[w] = literal[int] /( identifier[ferrs] * identifier[ferrs] ))
identifier[series_linear_slope] = identifier[fitcoeffs] [ literal[int] ]
identifier[rolled_fmags] = identifier[nproll] ( identifier[fmags] , literal[int] )
identifier[series_magratio] =(
( identifier[npmax] ( identifier[fmags] )- identifier[moments] [ literal[string] ])/( identifier[npmax] ( identifier[fmags] )- identifier[npmin] ( identifier[fmags] ))
)
identifier[measures] ={
literal[string] : identifier[fmags] . identifier[size] ,
literal[string] : identifier[mintime] ,
literal[string] : identifier[maxtime] ,
literal[string] : identifier[timelength] ,
literal[string] : identifier[series_amplitude] ,
literal[string] : identifier[ndet] / identifier[timelength] ,
literal[string] : identifier[series_linear_slope] ,
literal[string] : identifier[series_magratio] ,
}
keyword[if] identifier[moments] :
identifier[measures] . identifier[update] ( identifier[moments] )
keyword[if] identifier[ptpmeasures] :
identifier[measures] . identifier[update] ( identifier[ptpmeasures] )
keyword[if] identifier[fluxmeasures] :
identifier[measures] . identifier[update] ( identifier[fluxmeasures] )
keyword[return] identifier[measures]
keyword[else] :
identifier[LOGERROR] ( literal[string]
literal[string] )
keyword[return] keyword[None]
|
def nonperiodic_lightcurve_features(times, mags, errs, magsarefluxes=False):
"""This calculates the following nonperiodic features of the light curve,
listed in Richards, et al. 2011):
- amplitude
- beyond1std
- flux_percentile_ratio_mid20
- flux_percentile_ratio_mid35
- flux_percentile_ratio_mid50
- flux_percentile_ratio_mid65
- flux_percentile_ratio_mid80
- linear_trend
- max_slope
- median_absolute_deviation
- median_buffer_range_percentage
- pair_slope_trend
- percent_amplitude
- percent_difference_flux_percentile
- skew
- stdev
- timelength
- mintime
- maxtime
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to process.
magsarefluxes : bool
If True, will treat values in `mags` as fluxes instead of magnitudes.
Returns
-------
dict
A dict containing all of the features listed above.
"""
# remove nans first
finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
(ftimes, fmags, ferrs) = (times[finiteind], mags[finiteind], errs[finiteind])
# remove zero errors
nzind = npnonzero(ferrs)
(ftimes, fmags, ferrs) = (ftimes[nzind], fmags[nzind], ferrs[nzind])
ndet = len(fmags)
if ndet > 9:
# calculate the moments
moments = lightcurve_moments(ftimes, fmags, ferrs)
# calculate the flux measures
fluxmeasures = lightcurve_flux_measures(ftimes, fmags, ferrs, magsarefluxes=magsarefluxes)
# calculate the point-to-point measures
ptpmeasures = lightcurve_ptp_measures(ftimes, fmags, ferrs)
# get the length in time
(mintime, maxtime) = (npmin(ftimes), npmax(ftimes))
timelength = maxtime - mintime
# get the amplitude
series_amplitude = 0.5 * (npmax(fmags) - npmin(fmags))
# calculate the linear fit to the entire mag series
fitcoeffs = nppolyfit(ftimes, fmags, 1, w=1.0 / (ferrs * ferrs))
series_linear_slope = fitcoeffs[1]
# roll fmags by 1
rolled_fmags = nproll(fmags, 1)
# calculate the magnitude ratio (from the WISE paper)
series_magratio = (npmax(fmags) - moments['median']) / (npmax(fmags) - npmin(fmags))
# this is the dictionary returned containing all the measures
measures = {'ndet': fmags.size, 'mintime': mintime, 'maxtime': maxtime, 'timelength': timelength, 'amplitude': series_amplitude, 'ndetobslength_ratio': ndet / timelength, 'linear_fit_slope': series_linear_slope, 'magnitude_ratio': series_magratio}
if moments:
measures.update(moments) # depends on [control=['if'], data=[]]
if ptpmeasures:
measures.update(ptpmeasures) # depends on [control=['if'], data=[]]
if fluxmeasures:
measures.update(fluxmeasures) # depends on [control=['if'], data=[]]
return measures # depends on [control=['if'], data=['ndet']]
else:
LOGERROR('not enough detections in this magseries to calculate non-periodic features')
return None
|
def _modify_event(self, event_name, method, func):
"""
Wrapper to call a list's method from one of the events
"""
if event_name not in self.ALL_EVENTS:
raise TypeError(('event_name ("%s") can only be one of the '
'following: %s') % (event_name,
repr(self.ALL_EVENTS)))
if not isinstance(func, collections.Callable):
raise TypeError(('func must be callable to be added as an '
'observer.'))
getattr(self._events[event_name], method)(func)
|
def function[_modify_event, parameter[self, event_name, method, func]]:
constant[
Wrapper to call a list's method from one of the events
]
if compare[name[event_name] <ast.NotIn object at 0x7da2590d7190> name[self].ALL_EVENTS] begin[:]
<ast.Raise object at 0x7da1b2850a90>
if <ast.UnaryOp object at 0x7da1b28506a0> begin[:]
<ast.Raise object at 0x7da1b2853220>
call[call[name[getattr], parameter[call[name[self]._events][name[event_name]], name[method]]], parameter[name[func]]]
|
keyword[def] identifier[_modify_event] ( identifier[self] , identifier[event_name] , identifier[method] , identifier[func] ):
literal[string]
keyword[if] identifier[event_name] keyword[not] keyword[in] identifier[self] . identifier[ALL_EVENTS] :
keyword[raise] identifier[TypeError] (( literal[string]
literal[string] )%( identifier[event_name] ,
identifier[repr] ( identifier[self] . identifier[ALL_EVENTS] )))
keyword[if] keyword[not] identifier[isinstance] ( identifier[func] , identifier[collections] . identifier[Callable] ):
keyword[raise] identifier[TypeError] (( literal[string]
literal[string] ))
identifier[getattr] ( identifier[self] . identifier[_events] [ identifier[event_name] ], identifier[method] )( identifier[func] )
|
def _modify_event(self, event_name, method, func):
"""
Wrapper to call a list's method from one of the events
"""
if event_name not in self.ALL_EVENTS:
raise TypeError('event_name ("%s") can only be one of the following: %s' % (event_name, repr(self.ALL_EVENTS))) # depends on [control=['if'], data=['event_name']]
if not isinstance(func, collections.Callable):
raise TypeError('func must be callable to be added as an observer.') # depends on [control=['if'], data=[]]
getattr(self._events[event_name], method)(func)
|
def visit_Import(self, node):
"""callback for 'import' statement"""
self.imports.extend((None, n.name, n.asname, None)
for n in node.names)
ast.NodeVisitor.generic_visit(self, node)
|
def function[visit_Import, parameter[self, node]]:
constant[callback for 'import' statement]
call[name[self].imports.extend, parameter[<ast.GeneratorExp object at 0x7da18ede5a20>]]
call[name[ast].NodeVisitor.generic_visit, parameter[name[self], name[node]]]
|
keyword[def] identifier[visit_Import] ( identifier[self] , identifier[node] ):
literal[string]
identifier[self] . identifier[imports] . identifier[extend] (( keyword[None] , identifier[n] . identifier[name] , identifier[n] . identifier[asname] , keyword[None] )
keyword[for] identifier[n] keyword[in] identifier[node] . identifier[names] )
identifier[ast] . identifier[NodeVisitor] . identifier[generic_visit] ( identifier[self] , identifier[node] )
|
def visit_Import(self, node):
"""callback for 'import' statement"""
self.imports.extend(((None, n.name, n.asname, None) for n in node.names))
ast.NodeVisitor.generic_visit(self, node)
|
async def _upload_chunks(
cls, rfile: BootResourceFile, content: io.IOBase, chunk_size: int,
progress_callback=None):
"""Upload the `content` to `rfile` in chunks using `chunk_size`."""
content.seek(0, io.SEEK_SET)
upload_uri = urlparse(
cls._handler.uri)._replace(path=rfile._data['upload_uri']).geturl()
uploaded_size = 0
insecure = cls._handler.session.insecure
connector = aiohttp.TCPConnector(verify_ssl=(not insecure))
session = aiohttp.ClientSession(connector=connector)
async with session:
while True:
buf = content.read(chunk_size)
length = len(buf)
if length > 0:
uploaded_size += length
await cls._put_chunk(session, upload_uri, buf)
if progress_callback is not None:
progress_callback(uploaded_size / rfile.size)
if length != chunk_size:
break
|
<ast.AsyncFunctionDef object at 0x7da1b26af970>
|
keyword[async] keyword[def] identifier[_upload_chunks] (
identifier[cls] , identifier[rfile] : identifier[BootResourceFile] , identifier[content] : identifier[io] . identifier[IOBase] , identifier[chunk_size] : identifier[int] ,
identifier[progress_callback] = keyword[None] ):
literal[string]
identifier[content] . identifier[seek] ( literal[int] , identifier[io] . identifier[SEEK_SET] )
identifier[upload_uri] = identifier[urlparse] (
identifier[cls] . identifier[_handler] . identifier[uri] ). identifier[_replace] ( identifier[path] = identifier[rfile] . identifier[_data] [ literal[string] ]). identifier[geturl] ()
identifier[uploaded_size] = literal[int]
identifier[insecure] = identifier[cls] . identifier[_handler] . identifier[session] . identifier[insecure]
identifier[connector] = identifier[aiohttp] . identifier[TCPConnector] ( identifier[verify_ssl] =( keyword[not] identifier[insecure] ))
identifier[session] = identifier[aiohttp] . identifier[ClientSession] ( identifier[connector] = identifier[connector] )
keyword[async] keyword[with] identifier[session] :
keyword[while] keyword[True] :
identifier[buf] = identifier[content] . identifier[read] ( identifier[chunk_size] )
identifier[length] = identifier[len] ( identifier[buf] )
keyword[if] identifier[length] > literal[int] :
identifier[uploaded_size] += identifier[length]
keyword[await] identifier[cls] . identifier[_put_chunk] ( identifier[session] , identifier[upload_uri] , identifier[buf] )
keyword[if] identifier[progress_callback] keyword[is] keyword[not] keyword[None] :
identifier[progress_callback] ( identifier[uploaded_size] / identifier[rfile] . identifier[size] )
keyword[if] identifier[length] != identifier[chunk_size] :
keyword[break]
|
async def _upload_chunks(cls, rfile: BootResourceFile, content: io.IOBase, chunk_size: int, progress_callback=None):
"""Upload the `content` to `rfile` in chunks using `chunk_size`."""
content.seek(0, io.SEEK_SET)
upload_uri = urlparse(cls._handler.uri)._replace(path=rfile._data['upload_uri']).geturl()
uploaded_size = 0
insecure = cls._handler.session.insecure
connector = aiohttp.TCPConnector(verify_ssl=not insecure)
session = aiohttp.ClientSession(connector=connector)
async with session:
while True:
buf = content.read(chunk_size)
length = len(buf)
if length > 0:
uploaded_size += length
await cls._put_chunk(session, upload_uri, buf)
if progress_callback is not None:
progress_callback(uploaded_size / rfile.size) # depends on [control=['if'], data=['progress_callback']] # depends on [control=['if'], data=['length']]
if length != chunk_size:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
|
def interact(self):
"""This should call display(Javascript(jscode))."""
jscode = self.render()
display(Javascript(data=jscode,lib=self.jslibs))
|
def function[interact, parameter[self]]:
constant[This should call display(Javascript(jscode)).]
variable[jscode] assign[=] call[name[self].render, parameter[]]
call[name[display], parameter[call[name[Javascript], parameter[]]]]
|
keyword[def] identifier[interact] ( identifier[self] ):
literal[string]
identifier[jscode] = identifier[self] . identifier[render] ()
identifier[display] ( identifier[Javascript] ( identifier[data] = identifier[jscode] , identifier[lib] = identifier[self] . identifier[jslibs] ))
|
def interact(self):
"""This should call display(Javascript(jscode))."""
jscode = self.render()
display(Javascript(data=jscode, lib=self.jslibs))
|
def has_perm(self, user_obj, perm, obj=None):
"""
Check if user have permission (of object)
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the user is
found in ``field_name`` of the object (e.g. ``obj.collaborators``).
So once the object store the user as a collaborator in
``field_name`` attribute (default: ``collaborators``), the collaborator
can change or delete the object (you can change this behavior to set
``any_permission``, ``change_permission`` or ``delete_permission``
attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object).
"""
if not is_authenticated(user_obj):
return False
# construct the permission full name
change_permission = self.get_full_permission_string('change')
delete_permission = self.get_full_permission_string('delete')
if obj is None:
# object permission without obj should return True
# Ref: https://code.djangoproject.com/wiki/RowLevelPermissions
if self.any_permission:
return True
if self.change_permission and perm == change_permission:
return True
if self.delete_permission and perm == delete_permission:
return True
return False
elif user_obj.is_active:
# get collaborator queryset
collaborators = field_lookup(obj, self.field_name)
if hasattr(collaborators, 'all'):
collaborators = collaborators.all()
if user_obj in collaborators:
if self.any_permission:
# have any kind of permissions to the obj
return True
if (self.change_permission and
perm == change_permission):
return True
if (self.delete_permission and
perm == delete_permission):
return True
return False
|
def function[has_perm, parameter[self, user_obj, perm, obj]]:
constant[
Check if user have permission (of object)
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the user is
found in ``field_name`` of the object (e.g. ``obj.collaborators``).
So once the object store the user as a collaborator in
``field_name`` attribute (default: ``collaborators``), the collaborator
can change or delete the object (you can change this behavior to set
``any_permission``, ``change_permission`` or ``delete_permission``
attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object).
]
if <ast.UnaryOp object at 0x7da1b06fc100> begin[:]
return[constant[False]]
variable[change_permission] assign[=] call[name[self].get_full_permission_string, parameter[constant[change]]]
variable[delete_permission] assign[=] call[name[self].get_full_permission_string, parameter[constant[delete]]]
if compare[name[obj] is constant[None]] begin[:]
if name[self].any_permission begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b06fc0a0> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b06fca00> begin[:]
return[constant[True]]
return[constant[False]]
return[constant[False]]
|
keyword[def] identifier[has_perm] ( identifier[self] , identifier[user_obj] , identifier[perm] , identifier[obj] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[is_authenticated] ( identifier[user_obj] ):
keyword[return] keyword[False]
identifier[change_permission] = identifier[self] . identifier[get_full_permission_string] ( literal[string] )
identifier[delete_permission] = identifier[self] . identifier[get_full_permission_string] ( literal[string] )
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[any_permission] :
keyword[return] keyword[True]
keyword[if] identifier[self] . identifier[change_permission] keyword[and] identifier[perm] == identifier[change_permission] :
keyword[return] keyword[True]
keyword[if] identifier[self] . identifier[delete_permission] keyword[and] identifier[perm] == identifier[delete_permission] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[elif] identifier[user_obj] . identifier[is_active] :
identifier[collaborators] = identifier[field_lookup] ( identifier[obj] , identifier[self] . identifier[field_name] )
keyword[if] identifier[hasattr] ( identifier[collaborators] , literal[string] ):
identifier[collaborators] = identifier[collaborators] . identifier[all] ()
keyword[if] identifier[user_obj] keyword[in] identifier[collaborators] :
keyword[if] identifier[self] . identifier[any_permission] :
keyword[return] keyword[True]
keyword[if] ( identifier[self] . identifier[change_permission] keyword[and]
identifier[perm] == identifier[change_permission] ):
keyword[return] keyword[True]
keyword[if] ( identifier[self] . identifier[delete_permission] keyword[and]
identifier[perm] == identifier[delete_permission] ):
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def has_perm(self, user_obj, perm, obj=None):
"""
Check if user have permission (of object)
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the user is
found in ``field_name`` of the object (e.g. ``obj.collaborators``).
So once the object store the user as a collaborator in
``field_name`` attribute (default: ``collaborators``), the collaborator
can change or delete the object (you can change this behavior to set
``any_permission``, ``change_permission`` or ``delete_permission``
attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object).
"""
if not is_authenticated(user_obj):
return False # depends on [control=['if'], data=[]]
# construct the permission full name
change_permission = self.get_full_permission_string('change')
delete_permission = self.get_full_permission_string('delete')
if obj is None:
# object permission without obj should return True
# Ref: https://code.djangoproject.com/wiki/RowLevelPermissions
if self.any_permission:
return True # depends on [control=['if'], data=[]]
if self.change_permission and perm == change_permission:
return True # depends on [control=['if'], data=[]]
if self.delete_permission and perm == delete_permission:
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['if'], data=[]]
elif user_obj.is_active:
# get collaborator queryset
collaborators = field_lookup(obj, self.field_name)
if hasattr(collaborators, 'all'):
collaborators = collaborators.all() # depends on [control=['if'], data=[]]
if user_obj in collaborators:
if self.any_permission:
# have any kind of permissions to the obj
return True # depends on [control=['if'], data=[]]
if self.change_permission and perm == change_permission:
return True # depends on [control=['if'], data=[]]
if self.delete_permission and perm == delete_permission:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False
|
def data(self) -> Dict[str, Any]:
"""Return travel data."""
data: Dict[str, Any] = {}
data["station"] = self.station
data["stationId"] = self.station_id
data["filter"] = self.products_filter
journeys = []
for j in sorted(self.journeys, key=lambda k: k.real_departure)[
: self.max_journeys
]:
journeys.append(
{
"product": j.product,
"number": j.number,
"trainId": j.train_id,
"direction": j.direction,
"departure_time": j.real_departure_time,
"minutes": j.real_departure,
"delay": j.delay,
"stops": [s["station"] for s in j.stops],
"info": j.info,
"info_long": j.info_long,
"icon": j.icon,
}
)
data["journeys"] = journeys
return data
|
def function[data, parameter[self]]:
constant[Return travel data.]
<ast.AnnAssign object at 0x7da1b1130df0>
call[name[data]][constant[station]] assign[=] name[self].station
call[name[data]][constant[stationId]] assign[=] name[self].station_id
call[name[data]][constant[filter]] assign[=] name[self].products_filter
variable[journeys] assign[=] list[[]]
for taget[name[j]] in starred[call[call[name[sorted], parameter[name[self].journeys]]][<ast.Slice object at 0x7da1b1130460>]] begin[:]
call[name[journeys].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1130760>, <ast.Constant object at 0x7da1b1132620>, <ast.Constant object at 0x7da1b1131390>, <ast.Constant object at 0x7da1b1131a20>, <ast.Constant object at 0x7da1b1132380>, <ast.Constant object at 0x7da1b1132200>, <ast.Constant object at 0x7da1b1130550>, <ast.Constant object at 0x7da1b1131030>, <ast.Constant object at 0x7da1b1131d80>, <ast.Constant object at 0x7da1b1131b10>, <ast.Constant object at 0x7da1b1130f40>], [<ast.Attribute object at 0x7da1b1130310>, <ast.Attribute object at 0x7da1b11325c0>, <ast.Attribute object at 0x7da1b1130b20>, <ast.Attribute object at 0x7da1b1131cf0>, <ast.Attribute object at 0x7da1b1132710>, <ast.Attribute object at 0x7da1b11302e0>, <ast.Attribute object at 0x7da1b1131f90>, <ast.ListComp object at 0x7da1b1131840>, <ast.Attribute object at 0x7da1b1131960>, <ast.Attribute object at 0x7da1b11307c0>, <ast.Attribute object at 0x7da1b11301c0>]]]]
call[name[data]][constant[journeys]] assign[=] name[journeys]
return[name[data]]
|
keyword[def] identifier[data] ( identifier[self] )-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[data] : identifier[Dict] [ identifier[str] , identifier[Any] ]={}
identifier[data] [ literal[string] ]= identifier[self] . identifier[station]
identifier[data] [ literal[string] ]= identifier[self] . identifier[station_id]
identifier[data] [ literal[string] ]= identifier[self] . identifier[products_filter]
identifier[journeys] =[]
keyword[for] identifier[j] keyword[in] identifier[sorted] ( identifier[self] . identifier[journeys] , identifier[key] = keyword[lambda] identifier[k] : identifier[k] . identifier[real_departure] )[
: identifier[self] . identifier[max_journeys]
]:
identifier[journeys] . identifier[append] (
{
literal[string] : identifier[j] . identifier[product] ,
literal[string] : identifier[j] . identifier[number] ,
literal[string] : identifier[j] . identifier[train_id] ,
literal[string] : identifier[j] . identifier[direction] ,
literal[string] : identifier[j] . identifier[real_departure_time] ,
literal[string] : identifier[j] . identifier[real_departure] ,
literal[string] : identifier[j] . identifier[delay] ,
literal[string] :[ identifier[s] [ literal[string] ] keyword[for] identifier[s] keyword[in] identifier[j] . identifier[stops] ],
literal[string] : identifier[j] . identifier[info] ,
literal[string] : identifier[j] . identifier[info_long] ,
literal[string] : identifier[j] . identifier[icon] ,
}
)
identifier[data] [ literal[string] ]= identifier[journeys]
keyword[return] identifier[data]
|
def data(self) -> Dict[str, Any]:
"""Return travel data."""
data: Dict[str, Any] = {}
data['station'] = self.station
data['stationId'] = self.station_id
data['filter'] = self.products_filter
journeys = []
for j in sorted(self.journeys, key=lambda k: k.real_departure)[:self.max_journeys]:
journeys.append({'product': j.product, 'number': j.number, 'trainId': j.train_id, 'direction': j.direction, 'departure_time': j.real_departure_time, 'minutes': j.real_departure, 'delay': j.delay, 'stops': [s['station'] for s in j.stops], 'info': j.info, 'info_long': j.info_long, 'icon': j.icon}) # depends on [control=['for'], data=['j']]
data['journeys'] = journeys
return data
|
def validate(self):
""" Validate the contents of the configuration file. Will return None if validation is successful or
raise an error if not.
"""
if not isinstance(self._data, dict):
raise TypeError('freight forwarder configuration file must be a dict.')
current_log_level = logger.get_level()
if self._verbose:
logger.set_level('DEBUG')
else:
logger.set_level('ERROR')
logger.info('Starting configuration validation', extra={"formatter": 'config-start'})
# copy config dict to allow config data to stay in its original state.
config_data = self._data.copy()
try:
self._walk_tree(config_data, ROOT_SCHEME)
except ConfigValidationException as e:
e.log_error()
raise
logger.info("Config validation passed.", extra={'formatter': 'config-success'})
logger.set_level(current_log_level)
|
def function[validate, parameter[self]]:
constant[ Validate the contents of the configuration file. Will return None if validation is successful or
raise an error if not.
]
if <ast.UnaryOp object at 0x7da1b228cdf0> begin[:]
<ast.Raise object at 0x7da1b228e080>
variable[current_log_level] assign[=] call[name[logger].get_level, parameter[]]
if name[self]._verbose begin[:]
call[name[logger].set_level, parameter[constant[DEBUG]]]
call[name[logger].info, parameter[constant[Starting configuration validation]]]
variable[config_data] assign[=] call[name[self]._data.copy, parameter[]]
<ast.Try object at 0x7da1b228e470>
call[name[logger].info, parameter[constant[Config validation passed.]]]
call[name[logger].set_level, parameter[name[current_log_level]]]
|
keyword[def] identifier[validate] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[_data] , identifier[dict] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[current_log_level] = identifier[logger] . identifier[get_level] ()
keyword[if] identifier[self] . identifier[_verbose] :
identifier[logger] . identifier[set_level] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[set_level] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] , identifier[extra] ={ literal[string] : literal[string] })
identifier[config_data] = identifier[self] . identifier[_data] . identifier[copy] ()
keyword[try] :
identifier[self] . identifier[_walk_tree] ( identifier[config_data] , identifier[ROOT_SCHEME] )
keyword[except] identifier[ConfigValidationException] keyword[as] identifier[e] :
identifier[e] . identifier[log_error] ()
keyword[raise]
identifier[logger] . identifier[info] ( literal[string] , identifier[extra] ={ literal[string] : literal[string] })
identifier[logger] . identifier[set_level] ( identifier[current_log_level] )
|
def validate(self):
""" Validate the contents of the configuration file. Will return None if validation is successful or
raise an error if not.
"""
if not isinstance(self._data, dict):
raise TypeError('freight forwarder configuration file must be a dict.') # depends on [control=['if'], data=[]]
current_log_level = logger.get_level()
if self._verbose:
logger.set_level('DEBUG') # depends on [control=['if'], data=[]]
else:
logger.set_level('ERROR')
logger.info('Starting configuration validation', extra={'formatter': 'config-start'})
# copy config dict to allow config data to stay in its original state.
config_data = self._data.copy()
try:
self._walk_tree(config_data, ROOT_SCHEME) # depends on [control=['try'], data=[]]
except ConfigValidationException as e:
e.log_error()
raise # depends on [control=['except'], data=['e']]
logger.info('Config validation passed.', extra={'formatter': 'config-success'})
logger.set_level(current_log_level)
|
def fetch_data(self):
"""Get the latest data from HydroQuebec."""
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# Get balance
balances = yield from self._get_balances()
balances_len = len(balances)
balance_id = 0
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
# Get Hourly data
try:
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
day_date = yesterday.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
hourly_data = hourly_data['processed_hourly_data']
except Exception: # pylint: disable=W0703
# We don't have hourly data for some reason
hourly_data = {}
# Get Annual data
try:
annual_data = yield from self._get_annual_data(p_p_id)
except PyHydroQuebecAnnualError:
# We don't have annual data, which is possible if your
# contract is younger than 1 year
annual_data = {}
# Get Monthly data
monthly_data = yield from self._get_monthly_data(p_p_id)
monthly_data = monthly_data[0]
# Get daily data
start_date = monthly_data.get('dateDebutPeriode')
end_date = monthly_data.get('dateFinPeriode')
try:
daily_data = yield from self._get_daily_data(p_p_id, start_date, end_date)
except Exception: # pylint: disable=W0703
daily_data = []
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
daily_data = daily_data[0]['courant']
# format data
contract_data = {"balance": balances[balance_id]}
for key1, key2 in MONTHLY_MAP:
contract_data[key1] = monthly_data[key2]
for key1, key2 in ANNUAL_MAP:
contract_data[key1] = annual_data.get(key2, "")
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
for key1, key2 in DAILY_MAP:
contract_data[key1] = daily_data[key2]
# Hourly
if hourly_data:
contract_data['yesterday_hourly_consumption'] = hourly_data
# Add contract
self._data[contract] = contract_data
balance_count = balance_id + 1
if balance_count < balances_len:
balance_id += 1
|
def function[fetch_data, parameter[self]]:
constant[Get the latest data from HydroQuebec.]
<ast.YieldFrom object at 0x7da1b0f519f0>
variable[login_url] assign[=] <ast.YieldFrom object at 0x7da1b0f51e40>
<ast.YieldFrom object at 0x7da1b0f50d60>
<ast.Tuple object at 0x7da1b0f51c00> assign[=] <ast.YieldFrom object at 0x7da1b0f50340>
if compare[name[contracts] equal[==] dictionary[[], []]] begin[:]
variable[contracts] assign[=] <ast.YieldFrom object at 0x7da1b0f50310>
variable[balances] assign[=] <ast.YieldFrom object at 0x7da1b0f535e0>
variable[balances_len] assign[=] call[name[len], parameter[name[balances]]]
variable[balance_id] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0f50cd0>, <ast.Name object at 0x7da1b0f50400>]]] in starred[call[name[contracts].items, parameter[]]] begin[:]
if name[contract_url] begin[:]
<ast.YieldFrom object at 0x7da1b0f51a80>
<ast.Try object at 0x7da1b0f52ef0>
<ast.Try object at 0x7da1b0f53b80>
variable[monthly_data] assign[=] <ast.YieldFrom object at 0x7da1b0f50790>
variable[monthly_data] assign[=] call[name[monthly_data]][constant[0]]
variable[start_date] assign[=] call[name[monthly_data].get, parameter[constant[dateDebutPeriode]]]
variable[end_date] assign[=] call[name[monthly_data].get, parameter[constant[dateFinPeriode]]]
<ast.Try object at 0x7da1b0f50a00>
if name[daily_data] begin[:]
variable[daily_data] assign[=] call[call[name[daily_data]][constant[0]]][constant[courant]]
variable[contract_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f50b20>], [<ast.Subscript object at 0x7da1b0f518d0>]]
for taget[tuple[[<ast.Name object at 0x7da1b0f53280>, <ast.Name object at 0x7da1b0f50e80>]]] in starred[name[MONTHLY_MAP]] begin[:]
call[name[contract_data]][name[key1]] assign[=] call[name[monthly_data]][name[key2]]
for taget[tuple[[<ast.Name object at 0x7da1b0f517e0>, <ast.Name object at 0x7da1b0f53850>]]] in starred[name[ANNUAL_MAP]] begin[:]
call[name[contract_data]][name[key1]] assign[=] call[name[annual_data].get, parameter[name[key2], constant[]]]
if name[daily_data] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0e62ef0>, <ast.Name object at 0x7da1b0e639a0>]]] in starred[name[DAILY_MAP]] begin[:]
call[name[contract_data]][name[key1]] assign[=] call[name[daily_data]][name[key2]]
if name[hourly_data] begin[:]
call[name[contract_data]][constant[yesterday_hourly_consumption]] assign[=] name[hourly_data]
call[name[self]._data][name[contract]] assign[=] name[contract_data]
variable[balance_count] assign[=] binary_operation[name[balance_id] + constant[1]]
if compare[name[balance_count] less[<] name[balances_len]] begin[:]
<ast.AugAssign object at 0x7da1b0e60070>
|
keyword[def] identifier[fetch_data] ( identifier[self] ):
literal[string]
keyword[yield] keyword[from] identifier[self] . identifier[_get_httpsession] ()
identifier[login_url] = keyword[yield] keyword[from] identifier[self] . identifier[_get_login_page] ()
keyword[yield] keyword[from] identifier[self] . identifier[_post_login_page] ( identifier[login_url] )
identifier[p_p_id] , identifier[contracts] = keyword[yield] keyword[from] identifier[self] . identifier[_get_p_p_id_and_contract] ()
keyword[if] identifier[contracts] =={}:
identifier[contracts] = keyword[yield] keyword[from] identifier[self] . identifier[_get_lonely_contract] ()
identifier[balances] = keyword[yield] keyword[from] identifier[self] . identifier[_get_balances] ()
identifier[balances_len] = identifier[len] ( identifier[balances] )
identifier[balance_id] = literal[int]
keyword[for] identifier[contract] , identifier[contract_url] keyword[in] identifier[contracts] . identifier[items] ():
keyword[if] identifier[contract_url] :
keyword[yield] keyword[from] identifier[self] . identifier[_load_contract_page] ( identifier[contract_url] )
keyword[try] :
identifier[yesterday] = identifier[datetime] . identifier[datetime] . identifier[now] ( identifier[HQ_TIMEZONE] )- identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
identifier[day_date] = identifier[yesterday] . identifier[strftime] ( literal[string] )
identifier[hourly_data] = keyword[yield] keyword[from] identifier[self] . identifier[_get_hourly_data] ( identifier[day_date] , identifier[p_p_id] )
identifier[hourly_data] = identifier[hourly_data] [ literal[string] ]
keyword[except] identifier[Exception] :
identifier[hourly_data] ={}
keyword[try] :
identifier[annual_data] = keyword[yield] keyword[from] identifier[self] . identifier[_get_annual_data] ( identifier[p_p_id] )
keyword[except] identifier[PyHydroQuebecAnnualError] :
identifier[annual_data] ={}
identifier[monthly_data] = keyword[yield] keyword[from] identifier[self] . identifier[_get_monthly_data] ( identifier[p_p_id] )
identifier[monthly_data] = identifier[monthly_data] [ literal[int] ]
identifier[start_date] = identifier[monthly_data] . identifier[get] ( literal[string] )
identifier[end_date] = identifier[monthly_data] . identifier[get] ( literal[string] )
keyword[try] :
identifier[daily_data] = keyword[yield] keyword[from] identifier[self] . identifier[_get_daily_data] ( identifier[p_p_id] , identifier[start_date] , identifier[end_date] )
keyword[except] identifier[Exception] :
identifier[daily_data] =[]
keyword[if] identifier[daily_data] :
identifier[daily_data] = identifier[daily_data] [ literal[int] ][ literal[string] ]
identifier[contract_data] ={ literal[string] : identifier[balances] [ identifier[balance_id] ]}
keyword[for] identifier[key1] , identifier[key2] keyword[in] identifier[MONTHLY_MAP] :
identifier[contract_data] [ identifier[key1] ]= identifier[monthly_data] [ identifier[key2] ]
keyword[for] identifier[key1] , identifier[key2] keyword[in] identifier[ANNUAL_MAP] :
identifier[contract_data] [ identifier[key1] ]= identifier[annual_data] . identifier[get] ( identifier[key2] , literal[string] )
keyword[if] identifier[daily_data] :
keyword[for] identifier[key1] , identifier[key2] keyword[in] identifier[DAILY_MAP] :
identifier[contract_data] [ identifier[key1] ]= identifier[daily_data] [ identifier[key2] ]
keyword[if] identifier[hourly_data] :
identifier[contract_data] [ literal[string] ]= identifier[hourly_data]
identifier[self] . identifier[_data] [ identifier[contract] ]= identifier[contract_data]
identifier[balance_count] = identifier[balance_id] + literal[int]
keyword[if] identifier[balance_count] < identifier[balances_len] :
identifier[balance_id] += literal[int]
|
def fetch_data(self):
"""Get the latest data from HydroQuebec."""
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = (yield from self._get_login_page())
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
(p_p_id, contracts) = (yield from self._get_p_p_id_and_contract())
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = (yield from self._get_lonely_contract()) # depends on [control=['if'], data=['contracts']]
# Get balance
balances = (yield from self._get_balances())
balances_len = len(balances)
balance_id = 0
# For all contracts
for (contract, contract_url) in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url) # depends on [control=['if'], data=[]]
# Get Hourly data
try:
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
day_date = yesterday.strftime('%Y-%m-%d')
hourly_data = (yield from self._get_hourly_data(day_date, p_p_id))
hourly_data = hourly_data['processed_hourly_data'] # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=W0703
# We don't have hourly data for some reason
hourly_data = {} # depends on [control=['except'], data=[]]
# Get Annual data
try:
annual_data = (yield from self._get_annual_data(p_p_id)) # depends on [control=['try'], data=[]]
except PyHydroQuebecAnnualError:
# We don't have annual data, which is possible if your
# contract is younger than 1 year
annual_data = {} # depends on [control=['except'], data=[]]
# Get Monthly data
monthly_data = (yield from self._get_monthly_data(p_p_id))
monthly_data = monthly_data[0]
# Get daily data
start_date = monthly_data.get('dateDebutPeriode')
end_date = monthly_data.get('dateFinPeriode')
try:
daily_data = (yield from self._get_daily_data(p_p_id, start_date, end_date)) # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=W0703
daily_data = [] # depends on [control=['except'], data=[]]
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
daily_data = daily_data[0]['courant'] # depends on [control=['if'], data=[]]
# format data
contract_data = {'balance': balances[balance_id]}
for (key1, key2) in MONTHLY_MAP:
contract_data[key1] = monthly_data[key2] # depends on [control=['for'], data=[]]
for (key1, key2) in ANNUAL_MAP:
contract_data[key1] = annual_data.get(key2, '') # depends on [control=['for'], data=[]]
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
for (key1, key2) in DAILY_MAP:
contract_data[key1] = daily_data[key2] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Hourly
if hourly_data:
contract_data['yesterday_hourly_consumption'] = hourly_data # depends on [control=['if'], data=[]]
# Add contract
self._data[contract] = contract_data
balance_count = balance_id + 1
if balance_count < balances_len:
balance_id += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def eqy(ql, qs, ns=None,):
"""
*New in pywbem 0.12*
This function is a wrapper for :meth:`~pywbem.WBEMConnection.ExecQuery`.
Execute a query in a namespace.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
Returns:
A list of :class:`~pywbem.CIMInstance` objects that represents
the query result.
These instances have their `path` attribute set to identify
their creation class and the target namespace of the query, but
they are not addressable instances.
""" # noqa: E501
return CONN.ExecQuery(QueryLanguage=ql,
Query=qs,
namespace=ns)
|
def function[eqy, parameter[ql, qs, ns]]:
constant[
*New in pywbem 0.12*
This function is a wrapper for :meth:`~pywbem.WBEMConnection.ExecQuery`.
Execute a query in a namespace.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
Returns:
A list of :class:`~pywbem.CIMInstance` objects that represents
the query result.
These instances have their `path` attribute set to identify
their creation class and the target namespace of the query, but
they are not addressable instances.
]
return[call[name[CONN].ExecQuery, parameter[]]]
|
keyword[def] identifier[eqy] ( identifier[ql] , identifier[qs] , identifier[ns] = keyword[None] ,):
literal[string]
keyword[return] identifier[CONN] . identifier[ExecQuery] ( identifier[QueryLanguage] = identifier[ql] ,
identifier[Query] = identifier[qs] ,
identifier[namespace] = identifier[ns] )
|
def eqy(ql, qs, ns=None):
"""
*New in pywbem 0.12*
This function is a wrapper for :meth:`~pywbem.WBEMConnection.ExecQuery`.
Execute a query in a namespace.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
Returns:
A list of :class:`~pywbem.CIMInstance` objects that represents
the query result.
These instances have their `path` attribute set to identify
their creation class and the target namespace of the query, but
they are not addressable instances.
""" # noqa: E501
return CONN.ExecQuery(QueryLanguage=ql, Query=qs, namespace=ns)
|
def open (filename, mode='r', **options):
"""Returns an instance of a :class:`PCapStream` class which contains
the ``read()``, ``write()``, and ``close()`` methods. Binary mode
is assumed for this module, so the "b" is not required when
calling ``open()``.
If the optiontal ``rollover`` parameter is True, a
:class:`PCapRolloverStream` is created instead. In that case
``filename`` is treated as a ``strftime(3)`` format string and
``nbytes``, ``npackets``, ``nseconds``, and ``dryrun`` parameters
may also be specified. See :class:``PCapRolloverStream`` for more
information.
NOTE: :class:`PCapRolloverStream` is always opened in write mode
("wb") and supports only ``write()`` and ``close()``, not
``read()``.
"""
mode = mode.replace('b', '') + 'b'
if options.get('rollover', False):
stream = PCapRolloverStream(filename,
options.get('nbytes' , None),
options.get('npackets', None),
options.get('nseconds', None),
options.get('dryrun' , False))
else:
stream = PCapStream( __builtin__.open(filename, mode), mode )
return stream
|
def function[open, parameter[filename, mode]]:
constant[Returns an instance of a :class:`PCapStream` class which contains
the ``read()``, ``write()``, and ``close()`` methods. Binary mode
is assumed for this module, so the "b" is not required when
calling ``open()``.
If the optiontal ``rollover`` parameter is True, a
:class:`PCapRolloverStream` is created instead. In that case
``filename`` is treated as a ``strftime(3)`` format string and
``nbytes``, ``npackets``, ``nseconds``, and ``dryrun`` parameters
may also be specified. See :class:``PCapRolloverStream`` for more
information.
NOTE: :class:`PCapRolloverStream` is always opened in write mode
("wb") and supports only ``write()`` and ``close()``, not
``read()``.
]
variable[mode] assign[=] binary_operation[call[name[mode].replace, parameter[constant[b], constant[]]] + constant[b]]
if call[name[options].get, parameter[constant[rollover], constant[False]]] begin[:]
variable[stream] assign[=] call[name[PCapRolloverStream], parameter[name[filename], call[name[options].get, parameter[constant[nbytes], constant[None]]], call[name[options].get, parameter[constant[npackets], constant[None]]], call[name[options].get, parameter[constant[nseconds], constant[None]]], call[name[options].get, parameter[constant[dryrun], constant[False]]]]]
return[name[stream]]
|
keyword[def] identifier[open] ( identifier[filename] , identifier[mode] = literal[string] ,** identifier[options] ):
literal[string]
identifier[mode] = identifier[mode] . identifier[replace] ( literal[string] , literal[string] )+ literal[string]
keyword[if] identifier[options] . identifier[get] ( literal[string] , keyword[False] ):
identifier[stream] = identifier[PCapRolloverStream] ( identifier[filename] ,
identifier[options] . identifier[get] ( literal[string] , keyword[None] ),
identifier[options] . identifier[get] ( literal[string] , keyword[None] ),
identifier[options] . identifier[get] ( literal[string] , keyword[None] ),
identifier[options] . identifier[get] ( literal[string] , keyword[False] ))
keyword[else] :
identifier[stream] = identifier[PCapStream] ( identifier[__builtin__] . identifier[open] ( identifier[filename] , identifier[mode] ), identifier[mode] )
keyword[return] identifier[stream]
|
def open(filename, mode='r', **options):
"""Returns an instance of a :class:`PCapStream` class which contains
the ``read()``, ``write()``, and ``close()`` methods. Binary mode
is assumed for this module, so the "b" is not required when
calling ``open()``.
If the optiontal ``rollover`` parameter is True, a
:class:`PCapRolloverStream` is created instead. In that case
``filename`` is treated as a ``strftime(3)`` format string and
``nbytes``, ``npackets``, ``nseconds``, and ``dryrun`` parameters
may also be specified. See :class:``PCapRolloverStream`` for more
information.
NOTE: :class:`PCapRolloverStream` is always opened in write mode
("wb") and supports only ``write()`` and ``close()``, not
``read()``.
"""
mode = mode.replace('b', '') + 'b'
if options.get('rollover', False):
stream = PCapRolloverStream(filename, options.get('nbytes', None), options.get('npackets', None), options.get('nseconds', None), options.get('dryrun', False)) # depends on [control=['if'], data=[]]
else:
stream = PCapStream(__builtin__.open(filename, mode), mode)
return stream
|
def stop(self):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 120 minutes or when all clients have
disconnected from the session being archived.
"""
temp_archive = self.sdk.stop_archive(self.id)
for k,v in iteritems(temp_archive.attrs()):
setattr(self, k, v)
|
def function[stop, parameter[self]]:
constant[
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 120 minutes or when all clients have
disconnected from the session being archived.
]
variable[temp_archive] assign[=] call[name[self].sdk.stop_archive, parameter[name[self].id]]
for taget[tuple[[<ast.Name object at 0x7da1b031c2e0>, <ast.Name object at 0x7da1b031de40>]]] in starred[call[name[iteritems], parameter[call[name[temp_archive].attrs, parameter[]]]]] begin[:]
call[name[setattr], parameter[name[self], name[k], name[v]]]
|
keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
identifier[temp_archive] = identifier[self] . identifier[sdk] . identifier[stop_archive] ( identifier[self] . identifier[id] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[temp_archive] . identifier[attrs] ()):
identifier[setattr] ( identifier[self] , identifier[k] , identifier[v] )
|
def stop(self):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 120 minutes or when all clients have
disconnected from the session being archived.
"""
temp_archive = self.sdk.stop_archive(self.id)
for (k, v) in iteritems(temp_archive.attrs()):
setattr(self, k, v) # depends on [control=['for'], data=[]]
|
def mode_challenge_response(self, secret, type='HMAC', variable=True, require_button=False):
"""
Set the YubiKey up for challenge-response operation.
`type' can be 'HMAC' or 'OTP'.
`variable' is only applicable to type 'HMAC'.
For type HMAC, `secret' is expected to be 20 bytes (160 bits).
For type OTP, `secret' is expected to be 16 bytes (128 bits).
Requires YubiKey 2.2.
"""
if not type.upper() in ['HMAC', 'OTP']:
raise yubico_exception.InputError('Invalid \'type\' (%s)' % type)
if not self.capabilities.have_challenge_response(type.upper()):
raise yubikey_base.YubiKeyVersionError('%s Challenge-Response not available in %s version %d.%d' \
% (type.upper(), self.capabilities.model, \
self.ykver[0], self.ykver[1]))
self._change_mode('CHAL_RESP', major=2, minor=2)
if type.upper() == 'HMAC':
self.config_flag('CHAL_HMAC', True)
self.config_flag('HMAC_LT64', variable)
self._set_20_bytes_key(secret)
else:
# type is 'OTP', checked above
self.config_flag('CHAL_YUBICO', True)
self.aes_key(secret)
self.config_flag('CHAL_BTN_TRIG', require_button)
|
def function[mode_challenge_response, parameter[self, secret, type, variable, require_button]]:
constant[
Set the YubiKey up for challenge-response operation.
`type' can be 'HMAC' or 'OTP'.
`variable' is only applicable to type 'HMAC'.
For type HMAC, `secret' is expected to be 20 bytes (160 bits).
For type OTP, `secret' is expected to be 16 bytes (128 bits).
Requires YubiKey 2.2.
]
if <ast.UnaryOp object at 0x7da1b08da9b0> begin[:]
<ast.Raise object at 0x7da1b08da6b0>
if <ast.UnaryOp object at 0x7da1b08dbe50> begin[:]
<ast.Raise object at 0x7da1b08db7c0>
call[name[self]._change_mode, parameter[constant[CHAL_RESP]]]
if compare[call[name[type].upper, parameter[]] equal[==] constant[HMAC]] begin[:]
call[name[self].config_flag, parameter[constant[CHAL_HMAC], constant[True]]]
call[name[self].config_flag, parameter[constant[HMAC_LT64], name[variable]]]
call[name[self]._set_20_bytes_key, parameter[name[secret]]]
call[name[self].config_flag, parameter[constant[CHAL_BTN_TRIG], name[require_button]]]
|
keyword[def] identifier[mode_challenge_response] ( identifier[self] , identifier[secret] , identifier[type] = literal[string] , identifier[variable] = keyword[True] , identifier[require_button] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[type] . identifier[upper] () keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[yubico_exception] . identifier[InputError] ( literal[string] % identifier[type] )
keyword[if] keyword[not] identifier[self] . identifier[capabilities] . identifier[have_challenge_response] ( identifier[type] . identifier[upper] ()):
keyword[raise] identifier[yubikey_base] . identifier[YubiKeyVersionError] ( literal[string] %( identifier[type] . identifier[upper] (), identifier[self] . identifier[capabilities] . identifier[model] , identifier[self] . identifier[ykver] [ literal[int] ], identifier[self] . identifier[ykver] [ literal[int] ]))
identifier[self] . identifier[_change_mode] ( literal[string] , identifier[major] = literal[int] , identifier[minor] = literal[int] )
keyword[if] identifier[type] . identifier[upper] ()== literal[string] :
identifier[self] . identifier[config_flag] ( literal[string] , keyword[True] )
identifier[self] . identifier[config_flag] ( literal[string] , identifier[variable] )
identifier[self] . identifier[_set_20_bytes_key] ( identifier[secret] )
keyword[else] :
identifier[self] . identifier[config_flag] ( literal[string] , keyword[True] )
identifier[self] . identifier[aes_key] ( identifier[secret] )
identifier[self] . identifier[config_flag] ( literal[string] , identifier[require_button] )
|
def mode_challenge_response(self, secret, type='HMAC', variable=True, require_button=False):
"""
Set the YubiKey up for challenge-response operation.
`type' can be 'HMAC' or 'OTP'.
`variable' is only applicable to type 'HMAC'.
For type HMAC, `secret' is expected to be 20 bytes (160 bits).
For type OTP, `secret' is expected to be 16 bytes (128 bits).
Requires YubiKey 2.2.
"""
if not type.upper() in ['HMAC', 'OTP']:
raise yubico_exception.InputError("Invalid 'type' (%s)" % type) # depends on [control=['if'], data=[]]
if not self.capabilities.have_challenge_response(type.upper()):
raise yubikey_base.YubiKeyVersionError('%s Challenge-Response not available in %s version %d.%d' % (type.upper(), self.capabilities.model, self.ykver[0], self.ykver[1])) # depends on [control=['if'], data=[]]
self._change_mode('CHAL_RESP', major=2, minor=2)
if type.upper() == 'HMAC':
self.config_flag('CHAL_HMAC', True)
self.config_flag('HMAC_LT64', variable)
self._set_20_bytes_key(secret) # depends on [control=['if'], data=[]]
else:
# type is 'OTP', checked above
self.config_flag('CHAL_YUBICO', True)
self.aes_key(secret)
self.config_flag('CHAL_BTN_TRIG', require_button)
|
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data
|
def function[decompress, parameter[compressed_data]]:
constant[Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes]
variable[raw_data] assign[=] list[[]]
variable[index] assign[=] constant[0]
while compare[name[index] less[<] call[name[len], parameter[name[compressed_data]]]] begin[:]
variable[current] assign[=] call[name[compressed_data]][name[index]]
<ast.AugAssign object at 0x7da1b0f535e0>
if compare[name[current] equal[==] name[RLE_BYTE]] begin[:]
variable[directive] assign[=] call[name[compressed_data]][name[index]]
<ast.AugAssign object at 0x7da1b0f504f0>
if compare[name[directive] equal[==] name[RLE_BYTE]] begin[:]
call[name[raw_data].append, parameter[name[RLE_BYTE]]]
return[name[raw_data]]
|
keyword[def] identifier[decompress] ( identifier[compressed_data] ):
literal[string]
identifier[raw_data] =[]
identifier[index] = literal[int]
keyword[while] identifier[index] < identifier[len] ( identifier[compressed_data] ):
identifier[current] = identifier[compressed_data] [ identifier[index] ]
identifier[index] += literal[int]
keyword[if] identifier[current] == identifier[RLE_BYTE] :
identifier[directive] = identifier[compressed_data] [ identifier[index] ]
identifier[index] += literal[int]
keyword[if] identifier[directive] == identifier[RLE_BYTE] :
identifier[raw_data] . identifier[append] ( identifier[RLE_BYTE] )
keyword[else] :
identifier[count] = identifier[compressed_data] [ identifier[index] ]
identifier[index] += literal[int]
identifier[raw_data] . identifier[extend] ([ identifier[directive] ]* identifier[count] )
keyword[elif] identifier[current] == identifier[SPECIAL_BYTE] :
identifier[directive] = identifier[compressed_data] [ identifier[index] ]
identifier[index] += literal[int]
keyword[if] identifier[directive] == identifier[SPECIAL_BYTE] :
identifier[raw_data] . identifier[append] ( identifier[SPECIAL_BYTE] )
keyword[elif] identifier[directive] == identifier[DEFAULT_WAVE_BYTE] :
identifier[count] = identifier[compressed_data] [ identifier[index] ]
identifier[index] += literal[int]
identifier[raw_data] . identifier[extend] ( identifier[DEFAULT_WAVE] * identifier[count] )
keyword[elif] identifier[directive] == identifier[DEFAULT_INSTR_BYTE] :
identifier[count] = identifier[compressed_data] [ identifier[index] ]
identifier[index] += literal[int]
identifier[raw_data] . identifier[extend] ( identifier[DEFAULT_INSTRUMENT_FILEPACK] * identifier[count] )
keyword[elif] identifier[directive] == identifier[EOF_BYTE] :
keyword[assert] keyword[False] ,( literal[string]
literal[string] )
keyword[else] :
keyword[assert] keyword[False] , literal[string] %(
identifier[current] , identifier[directive] )
keyword[else] :
identifier[raw_data] . identifier[append] ( identifier[current] )
keyword[return] identifier[raw_data]
|
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE) # depends on [control=['if'], data=['RLE_BYTE']]
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count) # depends on [control=['if'], data=['RLE_BYTE']]
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE) # depends on [control=['if'], data=['SPECIAL_BYTE']]
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count) # depends on [control=['if'], data=[]]
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count) # depends on [control=['if'], data=[]]
elif directive == EOF_BYTE:
assert False, 'Unexpected EOF command encountered while decompressing' # depends on [control=['if'], data=[]]
else:
assert False, 'Countered unexpected sequence 0x%02x 0x%02x' % (current, directive) # depends on [control=['if'], data=['current', 'SPECIAL_BYTE']]
else:
raw_data.append(current) # depends on [control=['while'], data=['index']]
return raw_data
|
def get_resource_url(resource):
"""
Returns the URL for the given resource.
"""
path = model_path(resource)
parsed = list(urlparse.urlparse(path))
parsed[1] = ""
return urlparse.urlunparse(parsed)
|
def function[get_resource_url, parameter[resource]]:
constant[
Returns the URL for the given resource.
]
variable[path] assign[=] call[name[model_path], parameter[name[resource]]]
variable[parsed] assign[=] call[name[list], parameter[call[name[urlparse].urlparse, parameter[name[path]]]]]
call[name[parsed]][constant[1]] assign[=] constant[]
return[call[name[urlparse].urlunparse, parameter[name[parsed]]]]
|
keyword[def] identifier[get_resource_url] ( identifier[resource] ):
literal[string]
identifier[path] = identifier[model_path] ( identifier[resource] )
identifier[parsed] = identifier[list] ( identifier[urlparse] . identifier[urlparse] ( identifier[path] ))
identifier[parsed] [ literal[int] ]= literal[string]
keyword[return] identifier[urlparse] . identifier[urlunparse] ( identifier[parsed] )
|
def get_resource_url(resource):
"""
Returns the URL for the given resource.
"""
path = model_path(resource)
parsed = list(urlparse.urlparse(path))
parsed[1] = ''
return urlparse.urlunparse(parsed)
|
def deserialize(self, value, **kwargs):
"""Deserialize instance from JSON value
If a deserializer is registered, that is used. Otherwise, if the
instance_class is a HasProperties subclass, an instance can be
deserialized from a dictionary.
"""
kwargs.update({'trusted': kwargs.get('trusted', False)})
if self.deserializer is not None:
return self.deserializer(value, **kwargs)
if value is None:
return None
if isinstance(value, string_types):
return value
if issubclass(self.instance_class, base.HasProperties):
return self.instance_class.deserialize(value, **kwargs)
return self.from_json(value, **kwargs)
|
def function[deserialize, parameter[self, value]]:
constant[Deserialize instance from JSON value
If a deserializer is registered, that is used. Otherwise, if the
instance_class is a HasProperties subclass, an instance can be
deserialized from a dictionary.
]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b0432920>], [<ast.Call object at 0x7da1b0430040>]]]]
if compare[name[self].deserializer is_not constant[None]] begin[:]
return[call[name[self].deserializer, parameter[name[value]]]]
if compare[name[value] is constant[None]] begin[:]
return[constant[None]]
if call[name[isinstance], parameter[name[value], name[string_types]]] begin[:]
return[name[value]]
if call[name[issubclass], parameter[name[self].instance_class, name[base].HasProperties]] begin[:]
return[call[name[self].instance_class.deserialize, parameter[name[value]]]]
return[call[name[self].from_json, parameter[name[value]]]]
|
keyword[def] identifier[deserialize] ( identifier[self] , identifier[value] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] . identifier[update] ({ literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )})
keyword[if] identifier[self] . identifier[deserializer] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[deserializer] ( identifier[value] ,** identifier[kwargs] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
keyword[return] identifier[value]
keyword[if] identifier[issubclass] ( identifier[self] . identifier[instance_class] , identifier[base] . identifier[HasProperties] ):
keyword[return] identifier[self] . identifier[instance_class] . identifier[deserialize] ( identifier[value] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[from_json] ( identifier[value] ,** identifier[kwargs] )
|
def deserialize(self, value, **kwargs):
"""Deserialize instance from JSON value
If a deserializer is registered, that is used. Otherwise, if the
instance_class is a HasProperties subclass, an instance can be
deserialized from a dictionary.
"""
kwargs.update({'trusted': kwargs.get('trusted', False)})
if self.deserializer is not None:
return self.deserializer(value, **kwargs) # depends on [control=['if'], data=[]]
if value is None:
return None # depends on [control=['if'], data=[]]
if isinstance(value, string_types):
return value # depends on [control=['if'], data=[]]
if issubclass(self.instance_class, base.HasProperties):
return self.instance_class.deserialize(value, **kwargs) # depends on [control=['if'], data=[]]
return self.from_json(value, **kwargs)
|
def _post_filter(search, urlkwargs, definitions):
"""Ingest post filter in query."""
filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions)
for filter_ in filters:
search = search.post_filter(filter_)
return (search, urlkwargs)
|
def function[_post_filter, parameter[search, urlkwargs, definitions]]:
constant[Ingest post filter in query.]
<ast.Tuple object at 0x7da20c993820> assign[=] call[name[_create_filter_dsl], parameter[name[urlkwargs], name[definitions]]]
for taget[name[filter_]] in starred[name[filters]] begin[:]
variable[search] assign[=] call[name[search].post_filter, parameter[name[filter_]]]
return[tuple[[<ast.Name object at 0x7da1b03425c0>, <ast.Name object at 0x7da1b0341cc0>]]]
|
keyword[def] identifier[_post_filter] ( identifier[search] , identifier[urlkwargs] , identifier[definitions] ):
literal[string]
identifier[filters] , identifier[urlkwargs] = identifier[_create_filter_dsl] ( identifier[urlkwargs] , identifier[definitions] )
keyword[for] identifier[filter_] keyword[in] identifier[filters] :
identifier[search] = identifier[search] . identifier[post_filter] ( identifier[filter_] )
keyword[return] ( identifier[search] , identifier[urlkwargs] )
|
def _post_filter(search, urlkwargs, definitions):
"""Ingest post filter in query."""
(filters, urlkwargs) = _create_filter_dsl(urlkwargs, definitions)
for filter_ in filters:
search = search.post_filter(filter_) # depends on [control=['for'], data=['filter_']]
return (search, urlkwargs)
|
def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]:
"""
Retrieves a TAF string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
"""
url = _BASE_TAF_URL.format(station=station_icao)
with requests.get(url) as resp:
if not resp.ok:
return f'unable to obtain TAF for station {station_icao}\n' \
f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \
f'for a list of valid stations', None
return None, resp.content.decode().split('\n')[1]
|
def function[retrieve_taf, parameter[station_icao]]:
constant[
Retrieves a TAF string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
]
variable[url] assign[=] call[name[_BASE_TAF_URL].format, parameter[]]
with call[name[requests].get, parameter[name[url]]] begin[:]
if <ast.UnaryOp object at 0x7da1b15952a0> begin[:]
return[tuple[[<ast.JoinedStr object at 0x7da1b1596830>, <ast.Constant object at 0x7da1b1597be0>]]]
return[tuple[[<ast.Constant object at 0x7da1b15956f0>, <ast.Subscript object at 0x7da1b15968c0>]]]
|
keyword[def] identifier[retrieve_taf] ( identifier[station_icao] )-> identifier[typing] . identifier[Tuple] [ identifier[typing] . identifier[Union] [ identifier[str] , keyword[None] ], identifier[typing] . identifier[Union] [ identifier[str] , keyword[None] ]]:
literal[string]
identifier[url] = identifier[_BASE_TAF_URL] . identifier[format] ( identifier[station] = identifier[station_icao] )
keyword[with] identifier[requests] . identifier[get] ( identifier[url] ) keyword[as] identifier[resp] :
keyword[if] keyword[not] identifier[resp] . identifier[ok] :
keyword[return] literal[string] literal[string] literal[string] , keyword[None]
keyword[return] keyword[None] , identifier[resp] . identifier[content] . identifier[decode] (). identifier[split] ( literal[string] )[ literal[int] ]
|
def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]:
"""
Retrieves a TAF string from an online database
Args:
station_icao: ICAO of the station
Returns:
tuple of error, metar_str
"""
url = _BASE_TAF_URL.format(station=station_icao)
with requests.get(url) as resp:
if not resp.ok:
return (f'unable to obtain TAF for station {station_icao}\nGot to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" for a list of valid stations', None) # depends on [control=['if'], data=[]]
return (None, resp.content.decode().split('\n')[1]) # depends on [control=['with'], data=['resp']]
|
def herz_me(val):
"""Return integer value for Hz, translated from (MHz|kHz|Hz)."""
result = 0
if val.endswith("MHz"):
stripped = val.replace("MHz", "")
strip_fl = float(stripped)
result = strip_fl * 1000000
elif val.endswith("kHz"):
stripped = val.replace("kHz", "")
strip_fl = float(stripped)
result = strip_fl * 1000
elif val.endswith("Hz"):
stripped = val.replace("Hz", "")
result = float(stripped)
return(result)
|
def function[herz_me, parameter[val]]:
constant[Return integer value for Hz, translated from (MHz|kHz|Hz).]
variable[result] assign[=] constant[0]
if call[name[val].endswith, parameter[constant[MHz]]] begin[:]
variable[stripped] assign[=] call[name[val].replace, parameter[constant[MHz], constant[]]]
variable[strip_fl] assign[=] call[name[float], parameter[name[stripped]]]
variable[result] assign[=] binary_operation[name[strip_fl] * constant[1000000]]
return[name[result]]
|
keyword[def] identifier[herz_me] ( identifier[val] ):
literal[string]
identifier[result] = literal[int]
keyword[if] identifier[val] . identifier[endswith] ( literal[string] ):
identifier[stripped] = identifier[val] . identifier[replace] ( literal[string] , literal[string] )
identifier[strip_fl] = identifier[float] ( identifier[stripped] )
identifier[result] = identifier[strip_fl] * literal[int]
keyword[elif] identifier[val] . identifier[endswith] ( literal[string] ):
identifier[stripped] = identifier[val] . identifier[replace] ( literal[string] , literal[string] )
identifier[strip_fl] = identifier[float] ( identifier[stripped] )
identifier[result] = identifier[strip_fl] * literal[int]
keyword[elif] identifier[val] . identifier[endswith] ( literal[string] ):
identifier[stripped] = identifier[val] . identifier[replace] ( literal[string] , literal[string] )
identifier[result] = identifier[float] ( identifier[stripped] )
keyword[return] ( identifier[result] )
|
def herz_me(val):
"""Return integer value for Hz, translated from (MHz|kHz|Hz)."""
result = 0
if val.endswith('MHz'):
stripped = val.replace('MHz', '')
strip_fl = float(stripped)
result = strip_fl * 1000000 # depends on [control=['if'], data=[]]
elif val.endswith('kHz'):
stripped = val.replace('kHz', '')
strip_fl = float(stripped)
result = strip_fl * 1000 # depends on [control=['if'], data=[]]
elif val.endswith('Hz'):
stripped = val.replace('Hz', '')
result = float(stripped) # depends on [control=['if'], data=[]]
return result
|
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break
|
def function[_load, parameter[self, titles, descriptions, images, urls]]:
constant[
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
]
variable[enough] assign[=] <ast.Lambda object at 0x7da1b0af1f30>
if <ast.BoolOp object at 0x7da1b0af1060> begin[:]
variable[titles] assign[=] call[name[filter], parameter[constant[None], call[name[map], parameter[name[self]._clean_text, name[titles]]]]]
call[name[self].titles.extend, parameter[name[titles]]]
if <ast.BoolOp object at 0x7da1b0af0460> begin[:]
variable[descriptions] assign[=] call[name[filter], parameter[constant[None], call[name[map], parameter[name[self]._clean_text, name[descriptions]]]]]
call[name[self].descriptions.extend, parameter[name[descriptions]]]
if name[config].GET_ALL_DATA begin[:]
variable[images] assign[=] call[name[filter], parameter[constant[None], call[name[map], parameter[name[self]._filter_image, name[images]]]]]
call[name[self].images.extend, parameter[name[images]]]
|
keyword[def] identifier[_load] ( identifier[self] , identifier[titles] =[], identifier[descriptions] =[], identifier[images] =[], identifier[urls] =[],** identifier[kwargs] ):
literal[string]
identifier[enough] = keyword[lambda] identifier[items] : identifier[items]
keyword[if] identifier[config] . identifier[GET_ALL_DATA] keyword[or] keyword[not] identifier[enough] ( identifier[self] . identifier[titles] ):
identifier[titles] = identifier[filter] ( keyword[None] , identifier[map] ( identifier[self] . identifier[_clean_text] , identifier[titles] ))
identifier[self] . identifier[titles] . identifier[extend] ( identifier[titles] )
keyword[if] identifier[config] . identifier[GET_ALL_DATA] keyword[or] keyword[not] identifier[enough] ( identifier[self] . identifier[descriptions] ):
identifier[descriptions] = identifier[filter] ( keyword[None] , identifier[map] ( identifier[self] . identifier[_clean_text] , identifier[descriptions] ))
identifier[self] . identifier[descriptions] . identifier[extend] ( identifier[descriptions] )
keyword[if] identifier[config] . identifier[GET_ALL_DATA] :
identifier[images] = identifier[filter] ( keyword[None] , identifier[map] ( identifier[self] . identifier[_filter_image] , identifier[images] ))
identifier[self] . identifier[images] . identifier[extend] ( identifier[images] )
keyword[elif] keyword[not] identifier[enough] ( identifier[self] . identifier[images] ):
keyword[for] identifier[i] keyword[in] identifier[images] :
identifier[image] = identifier[self] . identifier[_filter_image] ( identifier[i] )
keyword[if] identifier[image] :
identifier[self] . identifier[images] . identifier[append] ( identifier[image] )
keyword[if] identifier[enough] ( identifier[self] . identifier[images] ):
keyword[break]
|
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles) # depends on [control=['if'], data=[]]
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions) # depends on [control=['if'], data=[]]
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images) # depends on [control=['if'], data=[]]
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image) # depends on [control=['if'], data=[]]
if enough(self.images):
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
|
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
"""
return {n: d[name] for n, d in G.node.items() if name in d}
|
def function[get_node_attributes, parameter[G, name]]:
constant[Get node attributes from graph
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
]
return[<ast.DictComp object at 0x7da1b05db280>]
|
keyword[def] identifier[get_node_attributes] ( identifier[G] , identifier[name] ):
literal[string]
keyword[return] { identifier[n] : identifier[d] [ identifier[name] ] keyword[for] identifier[n] , identifier[d] keyword[in] identifier[G] . identifier[node] . identifier[items] () keyword[if] identifier[name] keyword[in] identifier[d] }
|
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
"""
return {n: d[name] for (n, d) in G.node.items() if name in d}
|
def plot(self, numPoints=100):
"""
Plots the object in a 3D scatter.
This method should be overriden when possible. This default behavior simply
samples numPoints points from the object and plots them in a 3d scatter.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for feature in self._FEATURES:
for _ in xrange(numPoints):
x, y, z = tuple(self.sampleLocationFromFeature(feature))
ax.scatter(x, y, z, marker=".")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("{}".format(self))
return fig, ax
|
def function[plot, parameter[self, numPoints]]:
constant[
Plots the object in a 3D scatter.
This method should be overriden when possible. This default behavior simply
samples numPoints points from the object and plots them in a 3d scatter.
]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[fig].add_subplot, parameter[constant[111]]]
for taget[name[feature]] in starred[name[self]._FEATURES] begin[:]
for taget[name[_]] in starred[call[name[xrange], parameter[name[numPoints]]]] begin[:]
<ast.Tuple object at 0x7da1b08b9cf0> assign[=] call[name[tuple], parameter[call[name[self].sampleLocationFromFeature, parameter[name[feature]]]]]
call[name[ax].scatter, parameter[name[x], name[y], name[z]]]
call[name[ax].set_xlabel, parameter[constant[X]]]
call[name[ax].set_ylabel, parameter[constant[Y]]]
call[name[ax].set_zlabel, parameter[constant[Z]]]
call[name[plt].title, parameter[call[constant[{}].format, parameter[name[self]]]]]
return[tuple[[<ast.Name object at 0x7da1b0860bb0>, <ast.Name object at 0x7da1b086f8e0>]]]
|
keyword[def] identifier[plot] ( identifier[self] , identifier[numPoints] = literal[int] ):
literal[string]
identifier[fig] = identifier[plt] . identifier[figure] ()
identifier[ax] = identifier[fig] . identifier[add_subplot] ( literal[int] , identifier[projection] = literal[string] )
keyword[for] identifier[feature] keyword[in] identifier[self] . identifier[_FEATURES] :
keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[numPoints] ):
identifier[x] , identifier[y] , identifier[z] = identifier[tuple] ( identifier[self] . identifier[sampleLocationFromFeature] ( identifier[feature] ))
identifier[ax] . identifier[scatter] ( identifier[x] , identifier[y] , identifier[z] , identifier[marker] = literal[string] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_zlabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] . identifier[format] ( identifier[self] ))
keyword[return] identifier[fig] , identifier[ax]
|
def plot(self, numPoints=100):
"""
Plots the object in a 3D scatter.
This method should be overriden when possible. This default behavior simply
samples numPoints points from the object and plots them in a 3d scatter.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for feature in self._FEATURES:
for _ in xrange(numPoints):
(x, y, z) = tuple(self.sampleLocationFromFeature(feature))
ax.scatter(x, y, z, marker='.') # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['feature']]
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title('{}'.format(self))
return (fig, ax)
|
def set_insn(self, insn):
"""
Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string
"""
self.insn = insn
self.size = len(self.insn)
|
def function[set_insn, parameter[self, insn]]:
constant[
Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string
]
name[self].insn assign[=] name[insn]
name[self].size assign[=] call[name[len], parameter[name[self].insn]]
|
keyword[def] identifier[set_insn] ( identifier[self] , identifier[insn] ):
literal[string]
identifier[self] . identifier[insn] = identifier[insn]
identifier[self] . identifier[size] = identifier[len] ( identifier[self] . identifier[insn] )
|
def set_insn(self, insn):
"""
Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string
"""
self.insn = insn
self.size = len(self.insn)
|
def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
|
def function[_pcolormesh_array2d, parameter[self, array]]:
constant[Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
]
variable[x] assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.Attribute object at 0x7da18f09e6e0>, <ast.Subscript object at 0x7da18f09d330>]]]]
variable[y] assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.Attribute object at 0x7da18f09f610>, <ast.Subscript object at 0x7da18f09de70>]]]]
<ast.Tuple object at 0x7da18f09dff0> assign[=] call[name[numpy].meshgrid, parameter[name[x], name[y]]]
return[call[name[self].pcolormesh, parameter[name[xcoord], name[ycoord], name[array].value.T, <ast.Starred object at 0x7da18f09f9d0>]]]
|
keyword[def] identifier[_pcolormesh_array2d] ( identifier[self] , identifier[array] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[x] = identifier[numpy] . identifier[concatenate] (( identifier[array] . identifier[xindex] . identifier[value] , identifier[array] . identifier[xspan] [- literal[int] :]))
identifier[y] = identifier[numpy] . identifier[concatenate] (( identifier[array] . identifier[yindex] . identifier[value] , identifier[array] . identifier[yspan] [- literal[int] :]))
identifier[xcoord] , identifier[ycoord] = identifier[numpy] . identifier[meshgrid] ( identifier[x] , identifier[y] , identifier[copy] = keyword[False] , identifier[sparse] = keyword[True] )
keyword[return] identifier[self] . identifier[pcolormesh] ( identifier[xcoord] , identifier[ycoord] , identifier[array] . identifier[value] . identifier[T] ,* identifier[args] ,** identifier[kwargs] )
|
def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
(xcoord, ycoord) = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
|
def relMatches(rel_attr, target_rel):
"""Does this target_rel appear in the rel_str?"""
# XXX: TESTME
rels = rel_attr.strip().split()
for rel in rels:
rel = rel.lower()
if rel == target_rel:
return 1
return 0
|
def function[relMatches, parameter[rel_attr, target_rel]]:
constant[Does this target_rel appear in the rel_str?]
variable[rels] assign[=] call[call[name[rel_attr].strip, parameter[]].split, parameter[]]
for taget[name[rel]] in starred[name[rels]] begin[:]
variable[rel] assign[=] call[name[rel].lower, parameter[]]
if compare[name[rel] equal[==] name[target_rel]] begin[:]
return[constant[1]]
return[constant[0]]
|
keyword[def] identifier[relMatches] ( identifier[rel_attr] , identifier[target_rel] ):
literal[string]
identifier[rels] = identifier[rel_attr] . identifier[strip] (). identifier[split] ()
keyword[for] identifier[rel] keyword[in] identifier[rels] :
identifier[rel] = identifier[rel] . identifier[lower] ()
keyword[if] identifier[rel] == identifier[target_rel] :
keyword[return] literal[int]
keyword[return] literal[int]
|
def relMatches(rel_attr, target_rel):
"""Does this target_rel appear in the rel_str?"""
# XXX: TESTME
rels = rel_attr.strip().split()
for rel in rels:
rel = rel.lower()
if rel == target_rel:
return 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rel']]
return 0
|
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that this should eventually be handled by DistributionStrategies
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. Found {} '
'GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus)
|
def function[per_device_batch_size, parameter[batch_size, num_gpus]]:
constant[For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that this should eventually be handled by DistributionStrategies
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
]
if compare[name[num_gpus] less_or_equal[<=] constant[1]] begin[:]
return[name[batch_size]]
variable[remainder] assign[=] binary_operation[name[batch_size] <ast.Mod object at 0x7da2590d6920> name[num_gpus]]
if name[remainder] begin[:]
variable[err] assign[=] call[constant[When running with multiple GPUs, batch size must be a multiple of the number of available GPUs. Found {} GPUs with a batch size of {}; try --batch_size={} instead.].format, parameter[name[num_gpus], name[batch_size], binary_operation[name[batch_size] - name[remainder]]]]
<ast.Raise object at 0x7da18f813700>
return[call[name[int], parameter[binary_operation[name[batch_size] / name[num_gpus]]]]]
|
keyword[def] identifier[per_device_batch_size] ( identifier[batch_size] , identifier[num_gpus] ):
literal[string]
keyword[if] identifier[num_gpus] <= literal[int] :
keyword[return] identifier[batch_size]
identifier[remainder] = identifier[batch_size] % identifier[num_gpus]
keyword[if] identifier[remainder] :
identifier[err] =( literal[string]
literal[string]
literal[string]
). identifier[format] ( identifier[num_gpus] , identifier[batch_size] , identifier[batch_size] - identifier[remainder] )
keyword[raise] identifier[ValueError] ( identifier[err] )
keyword[return] identifier[int] ( identifier[batch_size] / identifier[num_gpus] )
|
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that this should eventually be handled by DistributionStrategies
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size # depends on [control=['if'], data=[]]
remainder = batch_size % num_gpus
if remainder:
err = 'When running with multiple GPUs, batch size must be a multiple of the number of available GPUs. Found {} GPUs with a batch size of {}; try --batch_size={} instead.'.format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err) # depends on [control=['if'], data=[]]
return int(batch_size / num_gpus)
|
def copy_meta_data_from_state_m(self, source_state_m):
"""Dismiss current meta data and copy meta data from given state model
The meta data of the given state model is used as meta data for this state. Also the meta data of all state
elements (data ports, outcomes, etc.) is overwritten with the meta data of the elements of the given state.
:param source_state_m: State model to load the meta data from
"""
self.meta = deepcopy(source_state_m.meta)
for input_data_port_m in self.input_data_ports:
source_data_port_m = source_state_m.get_input_data_port_m(input_data_port_m.data_port.data_port_id)
input_data_port_m.meta = deepcopy(source_data_port_m.meta)
for output_data_port_m in self.output_data_ports:
source_data_port_m = source_state_m.get_output_data_port_m(output_data_port_m.data_port.data_port_id)
output_data_port_m.meta = deepcopy(source_data_port_m.meta)
for outcome_m in self.outcomes:
source_outcome_m = source_state_m.get_outcome_m(outcome_m.outcome.outcome_id)
outcome_m.meta = deepcopy(source_outcome_m.meta)
self.income.meta = deepcopy(source_state_m.income.meta)
self.meta_signal.emit(MetaSignalMsg("copy_state_m", "all", True))
|
def function[copy_meta_data_from_state_m, parameter[self, source_state_m]]:
constant[Dismiss current meta data and copy meta data from given state model
The meta data of the given state model is used as meta data for this state. Also the meta data of all state
elements (data ports, outcomes, etc.) is overwritten with the meta data of the elements of the given state.
:param source_state_m: State model to load the meta data from
]
name[self].meta assign[=] call[name[deepcopy], parameter[name[source_state_m].meta]]
for taget[name[input_data_port_m]] in starred[name[self].input_data_ports] begin[:]
variable[source_data_port_m] assign[=] call[name[source_state_m].get_input_data_port_m, parameter[name[input_data_port_m].data_port.data_port_id]]
name[input_data_port_m].meta assign[=] call[name[deepcopy], parameter[name[source_data_port_m].meta]]
for taget[name[output_data_port_m]] in starred[name[self].output_data_ports] begin[:]
variable[source_data_port_m] assign[=] call[name[source_state_m].get_output_data_port_m, parameter[name[output_data_port_m].data_port.data_port_id]]
name[output_data_port_m].meta assign[=] call[name[deepcopy], parameter[name[source_data_port_m].meta]]
for taget[name[outcome_m]] in starred[name[self].outcomes] begin[:]
variable[source_outcome_m] assign[=] call[name[source_state_m].get_outcome_m, parameter[name[outcome_m].outcome.outcome_id]]
name[outcome_m].meta assign[=] call[name[deepcopy], parameter[name[source_outcome_m].meta]]
name[self].income.meta assign[=] call[name[deepcopy], parameter[name[source_state_m].income.meta]]
call[name[self].meta_signal.emit, parameter[call[name[MetaSignalMsg], parameter[constant[copy_state_m], constant[all], constant[True]]]]]
|
keyword[def] identifier[copy_meta_data_from_state_m] ( identifier[self] , identifier[source_state_m] ):
literal[string]
identifier[self] . identifier[meta] = identifier[deepcopy] ( identifier[source_state_m] . identifier[meta] )
keyword[for] identifier[input_data_port_m] keyword[in] identifier[self] . identifier[input_data_ports] :
identifier[source_data_port_m] = identifier[source_state_m] . identifier[get_input_data_port_m] ( identifier[input_data_port_m] . identifier[data_port] . identifier[data_port_id] )
identifier[input_data_port_m] . identifier[meta] = identifier[deepcopy] ( identifier[source_data_port_m] . identifier[meta] )
keyword[for] identifier[output_data_port_m] keyword[in] identifier[self] . identifier[output_data_ports] :
identifier[source_data_port_m] = identifier[source_state_m] . identifier[get_output_data_port_m] ( identifier[output_data_port_m] . identifier[data_port] . identifier[data_port_id] )
identifier[output_data_port_m] . identifier[meta] = identifier[deepcopy] ( identifier[source_data_port_m] . identifier[meta] )
keyword[for] identifier[outcome_m] keyword[in] identifier[self] . identifier[outcomes] :
identifier[source_outcome_m] = identifier[source_state_m] . identifier[get_outcome_m] ( identifier[outcome_m] . identifier[outcome] . identifier[outcome_id] )
identifier[outcome_m] . identifier[meta] = identifier[deepcopy] ( identifier[source_outcome_m] . identifier[meta] )
identifier[self] . identifier[income] . identifier[meta] = identifier[deepcopy] ( identifier[source_state_m] . identifier[income] . identifier[meta] )
identifier[self] . identifier[meta_signal] . identifier[emit] ( identifier[MetaSignalMsg] ( literal[string] , literal[string] , keyword[True] ))
|
def copy_meta_data_from_state_m(self, source_state_m):
"""Dismiss current meta data and copy meta data from given state model
The meta data of the given state model is used as meta data for this state. Also the meta data of all state
elements (data ports, outcomes, etc.) is overwritten with the meta data of the elements of the given state.
:param source_state_m: State model to load the meta data from
"""
self.meta = deepcopy(source_state_m.meta)
for input_data_port_m in self.input_data_ports:
source_data_port_m = source_state_m.get_input_data_port_m(input_data_port_m.data_port.data_port_id)
input_data_port_m.meta = deepcopy(source_data_port_m.meta) # depends on [control=['for'], data=['input_data_port_m']]
for output_data_port_m in self.output_data_ports:
source_data_port_m = source_state_m.get_output_data_port_m(output_data_port_m.data_port.data_port_id)
output_data_port_m.meta = deepcopy(source_data_port_m.meta) # depends on [control=['for'], data=['output_data_port_m']]
for outcome_m in self.outcomes:
source_outcome_m = source_state_m.get_outcome_m(outcome_m.outcome.outcome_id)
outcome_m.meta = deepcopy(source_outcome_m.meta) # depends on [control=['for'], data=['outcome_m']]
self.income.meta = deepcopy(source_state_m.income.meta)
self.meta_signal.emit(MetaSignalMsg('copy_state_m', 'all', True))
|
def metric_coherence_mimno_2011(topic_word_distrib, dtm, top_n=20, eps=1e-12, normalize=True, return_mean=False):
"""
Calculate coherence metric according to Mimno et al. 2011 (a.k.a. "U_Mass" coherence metric). There are two
modifications to the originally suggested measure:
- uses a different epsilon by default (set `eps=1` for original)
- uses a normalizing constant by default (set `normalize=False` for original)
Provide a topic word distribution $\phi$ as `topic_word_distrib` and a document-term-matrix `dtm` (can be sparse).
`top_n` controls how many most probable words per topic are selected.
By default, it will return a NumPy array of coherence values per topic (same ordering as in `topic_word_distrib`).
Set `return_mean` to True to return the mean of all topics instead.
D. Mimno, H. Wallach, E. Talley, M. Leenders, A. McCullum 2011: Optimizing semantic coherence in topic models
"""
n_topics, n_vocab = topic_word_distrib.shape
if n_vocab != dtm.shape[1]:
raise ValueError('shapes of provided `topic_word_distrib` and `dtm` do not match (vocab sizes differ)')
if top_n > n_vocab:
raise ValueError('`top_n=%d` is larger than the vocabulary size of %d words'
% (top_n, topic_word_distrib.shape[1]))
top_words = top_words_for_topics(topic_word_distrib, top_n) # V
if issparse(dtm) and dtm.format != 'csc':
dtm = dtm.tocsc()
coh = []
for t in range(n_topics):
c_t = 0
v = top_words[t]
top_dtm = dtm[:, v]
df = get_doc_frequencies(top_dtm) # D(v)
codf = get_codoc_frequencies(top_dtm) # D(v, v')
for m in range(1, top_n):
for l in range(m):
c_t += np.log((codf.get((m, l), codf.get((l, m))) + eps) /
df[l])
coh.append(c_t)
coh = np.array(coh)
if normalize:
coh *= 2 / (top_n * (top_n-1))
if return_mean:
return coh.mean()
else:
return coh
|
def function[metric_coherence_mimno_2011, parameter[topic_word_distrib, dtm, top_n, eps, normalize, return_mean]]:
constant[
Calculate coherence metric according to Mimno et al. 2011 (a.k.a. "U_Mass" coherence metric). There are two
modifications to the originally suggested measure:
- uses a different epsilon by default (set `eps=1` for original)
- uses a normalizing constant by default (set `normalize=False` for original)
Provide a topic word distribution $\phi$ as `topic_word_distrib` and a document-term-matrix `dtm` (can be sparse).
`top_n` controls how many most probable words per topic are selected.
By default, it will return a NumPy array of coherence values per topic (same ordering as in `topic_word_distrib`).
Set `return_mean` to True to return the mean of all topics instead.
D. Mimno, H. Wallach, E. Talley, M. Leenders, A. McCullum 2011: Optimizing semantic coherence in topic models
]
<ast.Tuple object at 0x7da18dc04e20> assign[=] name[topic_word_distrib].shape
if compare[name[n_vocab] not_equal[!=] call[name[dtm].shape][constant[1]]] begin[:]
<ast.Raise object at 0x7da18dc07f70>
if compare[name[top_n] greater[>] name[n_vocab]] begin[:]
<ast.Raise object at 0x7da18dc07e80>
variable[top_words] assign[=] call[name[top_words_for_topics], parameter[name[topic_word_distrib], name[top_n]]]
if <ast.BoolOp object at 0x7da18dc05120> begin[:]
variable[dtm] assign[=] call[name[dtm].tocsc, parameter[]]
variable[coh] assign[=] list[[]]
for taget[name[t]] in starred[call[name[range], parameter[name[n_topics]]]] begin[:]
variable[c_t] assign[=] constant[0]
variable[v] assign[=] call[name[top_words]][name[t]]
variable[top_dtm] assign[=] call[name[dtm]][tuple[[<ast.Slice object at 0x7da18dc061a0>, <ast.Name object at 0x7da18dc054b0>]]]
variable[df] assign[=] call[name[get_doc_frequencies], parameter[name[top_dtm]]]
variable[codf] assign[=] call[name[get_codoc_frequencies], parameter[name[top_dtm]]]
for taget[name[m]] in starred[call[name[range], parameter[constant[1], name[top_n]]]] begin[:]
for taget[name[l]] in starred[call[name[range], parameter[name[m]]]] begin[:]
<ast.AugAssign object at 0x7da18dc07eb0>
call[name[coh].append, parameter[name[c_t]]]
variable[coh] assign[=] call[name[np].array, parameter[name[coh]]]
if name[normalize] begin[:]
<ast.AugAssign object at 0x7da18dc07310>
if name[return_mean] begin[:]
return[call[name[coh].mean, parameter[]]]
|
keyword[def] identifier[metric_coherence_mimno_2011] ( identifier[topic_word_distrib] , identifier[dtm] , identifier[top_n] = literal[int] , identifier[eps] = literal[int] , identifier[normalize] = keyword[True] , identifier[return_mean] = keyword[False] ):
literal[string]
identifier[n_topics] , identifier[n_vocab] = identifier[topic_word_distrib] . identifier[shape]
keyword[if] identifier[n_vocab] != identifier[dtm] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[top_n] > identifier[n_vocab] :
keyword[raise] identifier[ValueError] ( literal[string]
%( identifier[top_n] , identifier[topic_word_distrib] . identifier[shape] [ literal[int] ]))
identifier[top_words] = identifier[top_words_for_topics] ( identifier[topic_word_distrib] , identifier[top_n] )
keyword[if] identifier[issparse] ( identifier[dtm] ) keyword[and] identifier[dtm] . identifier[format] != literal[string] :
identifier[dtm] = identifier[dtm] . identifier[tocsc] ()
identifier[coh] =[]
keyword[for] identifier[t] keyword[in] identifier[range] ( identifier[n_topics] ):
identifier[c_t] = literal[int]
identifier[v] = identifier[top_words] [ identifier[t] ]
identifier[top_dtm] = identifier[dtm] [:, identifier[v] ]
identifier[df] = identifier[get_doc_frequencies] ( identifier[top_dtm] )
identifier[codf] = identifier[get_codoc_frequencies] ( identifier[top_dtm] )
keyword[for] identifier[m] keyword[in] identifier[range] ( literal[int] , identifier[top_n] ):
keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[m] ):
identifier[c_t] += identifier[np] . identifier[log] (( identifier[codf] . identifier[get] (( identifier[m] , identifier[l] ), identifier[codf] . identifier[get] (( identifier[l] , identifier[m] )))+ identifier[eps] )/
identifier[df] [ identifier[l] ])
identifier[coh] . identifier[append] ( identifier[c_t] )
identifier[coh] = identifier[np] . identifier[array] ( identifier[coh] )
keyword[if] identifier[normalize] :
identifier[coh] *= literal[int] /( identifier[top_n] *( identifier[top_n] - literal[int] ))
keyword[if] identifier[return_mean] :
keyword[return] identifier[coh] . identifier[mean] ()
keyword[else] :
keyword[return] identifier[coh]
|
def metric_coherence_mimno_2011(topic_word_distrib, dtm, top_n=20, eps=1e-12, normalize=True, return_mean=False):
"""
Calculate coherence metric according to Mimno et al. 2011 (a.k.a. "U_Mass" coherence metric). There are two
modifications to the originally suggested measure:
- uses a different epsilon by default (set `eps=1` for original)
- uses a normalizing constant by default (set `normalize=False` for original)
Provide a topic word distribution $\\phi$ as `topic_word_distrib` and a document-term-matrix `dtm` (can be sparse).
`top_n` controls how many most probable words per topic are selected.
By default, it will return a NumPy array of coherence values per topic (same ordering as in `topic_word_distrib`).
Set `return_mean` to True to return the mean of all topics instead.
D. Mimno, H. Wallach, E. Talley, M. Leenders, A. McCullum 2011: Optimizing semantic coherence in topic models
"""
(n_topics, n_vocab) = topic_word_distrib.shape
if n_vocab != dtm.shape[1]:
raise ValueError('shapes of provided `topic_word_distrib` and `dtm` do not match (vocab sizes differ)') # depends on [control=['if'], data=[]]
if top_n > n_vocab:
raise ValueError('`top_n=%d` is larger than the vocabulary size of %d words' % (top_n, topic_word_distrib.shape[1])) # depends on [control=['if'], data=['top_n']]
top_words = top_words_for_topics(topic_word_distrib, top_n) # V
if issparse(dtm) and dtm.format != 'csc':
dtm = dtm.tocsc() # depends on [control=['if'], data=[]]
coh = []
for t in range(n_topics):
c_t = 0
v = top_words[t]
top_dtm = dtm[:, v]
df = get_doc_frequencies(top_dtm) # D(v)
codf = get_codoc_frequencies(top_dtm) # D(v, v')
for m in range(1, top_n):
for l in range(m):
c_t += np.log((codf.get((m, l), codf.get((l, m))) + eps) / df[l]) # depends on [control=['for'], data=['l']] # depends on [control=['for'], data=['m']]
coh.append(c_t) # depends on [control=['for'], data=['t']]
coh = np.array(coh)
if normalize:
coh *= 2 / (top_n * (top_n - 1)) # depends on [control=['if'], data=[]]
if return_mean:
return coh.mean() # depends on [control=['if'], data=[]]
else:
return coh
|
def _get_available_ports():
""" Tries to find the available serial ports on your system. """
if platform.system() == 'Darwin':
return glob.glob('/dev/tty.usb*')
elif platform.system() == 'Linux':
return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyAMA*')
elif sys.platform.lower() == 'cygwin':
return glob.glob('/dev/com*')
elif platform.system() == 'Windows':
import _winreg
import itertools
ports = []
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
for i in itertools.count():
try:
ports.append(str(_winreg.EnumValue(key, i)[1]))
except WindowsError:
return ports
else:
raise EnvironmentError('{} is an unsupported platform, cannot find serial ports !'.format(platform.system()))
return []
|
def function[_get_available_ports, parameter[]]:
constant[ Tries to find the available serial ports on your system. ]
if compare[call[name[platform].system, parameter[]] equal[==] constant[Darwin]] begin[:]
return[call[name[glob].glob, parameter[constant[/dev/tty.usb*]]]]
return[list[[]]]
|
keyword[def] identifier[_get_available_ports] ():
literal[string]
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
keyword[return] identifier[glob] . identifier[glob] ( literal[string] )
keyword[elif] identifier[platform] . identifier[system] ()== literal[string] :
keyword[return] identifier[glob] . identifier[glob] ( literal[string] )+ identifier[glob] . identifier[glob] ( literal[string] )+ identifier[glob] . identifier[glob] ( literal[string] )
keyword[elif] identifier[sys] . identifier[platform] . identifier[lower] ()== literal[string] :
keyword[return] identifier[glob] . identifier[glob] ( literal[string] )
keyword[elif] identifier[platform] . identifier[system] ()== literal[string] :
keyword[import] identifier[_winreg]
keyword[import] identifier[itertools]
identifier[ports] =[]
identifier[path] = literal[string]
identifier[key] = identifier[_winreg] . identifier[OpenKey] ( identifier[_winreg] . identifier[HKEY_LOCAL_MACHINE] , identifier[path] )
keyword[for] identifier[i] keyword[in] identifier[itertools] . identifier[count] ():
keyword[try] :
identifier[ports] . identifier[append] ( identifier[str] ( identifier[_winreg] . identifier[EnumValue] ( identifier[key] , identifier[i] )[ literal[int] ]))
keyword[except] identifier[WindowsError] :
keyword[return] identifier[ports]
keyword[else] :
keyword[raise] identifier[EnvironmentError] ( literal[string] . identifier[format] ( identifier[platform] . identifier[system] ()))
keyword[return] []
|
def _get_available_ports():
""" Tries to find the available serial ports on your system. """
if platform.system() == 'Darwin':
return glob.glob('/dev/tty.usb*') # depends on [control=['if'], data=[]]
elif platform.system() == 'Linux':
return glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyAMA*') # depends on [control=['if'], data=[]]
elif sys.platform.lower() == 'cygwin':
return glob.glob('/dev/com*') # depends on [control=['if'], data=[]]
elif platform.system() == 'Windows':
import _winreg
import itertools
ports = []
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
for i in itertools.count():
try:
ports.append(str(_winreg.EnumValue(key, i)[1])) # depends on [control=['try'], data=[]]
except WindowsError:
return ports # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
raise EnvironmentError('{} is an unsupported platform, cannot find serial ports !'.format(platform.system()))
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.