code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def post(self, request):
"""Handle a query observer unsubscription request."""
try:
observer_id = request.query_params['observer']
session_id = request.query_params['subscriber']
except KeyError:
return response.Response(status=400)
observer.remove_subscriber(session_id, observer_id)
return response.Response() | def function[post, parameter[self, request]]:
constant[Handle a query observer unsubscription request.]
<ast.Try object at 0x7da18dc04fd0>
call[name[observer].remove_subscriber, parameter[name[session_id], name[observer_id]]]
return[call[name[response].Response, parameter[]]] | keyword[def] identifier[post] ( identifier[self] , identifier[request] ):
literal[string]
keyword[try] :
identifier[observer_id] = identifier[request] . identifier[query_params] [ literal[string] ]
identifier[session_id] = identifier[request] . identifier[query_params] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[return] identifier[response] . identifier[Response] ( identifier[status] = literal[int] )
identifier[observer] . identifier[remove_subscriber] ( identifier[session_id] , identifier[observer_id] )
keyword[return] identifier[response] . identifier[Response] () | def post(self, request):
"""Handle a query observer unsubscription request."""
try:
observer_id = request.query_params['observer']
session_id = request.query_params['subscriber'] # depends on [control=['try'], data=[]]
except KeyError:
return response.Response(status=400) # depends on [control=['except'], data=[]]
observer.remove_subscriber(session_id, observer_id)
return response.Response() |
def dict_output(self, check_name, groups, source_name, limit):
'''
Builds the results into a JSON structure and writes it to the file buffer.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
'''
aggregates = self.build_structure(check_name, groups, source_name, limit)
return self.serialize(aggregates) | def function[dict_output, parameter[self, check_name, groups, source_name, limit]]:
constant[
Builds the results into a JSON structure and writes it to the file buffer.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
]
variable[aggregates] assign[=] call[name[self].build_structure, parameter[name[check_name], name[groups], name[source_name], name[limit]]]
return[call[name[self].serialize, parameter[name[aggregates]]]] | keyword[def] identifier[dict_output] ( identifier[self] , identifier[check_name] , identifier[groups] , identifier[source_name] , identifier[limit] ):
literal[string]
identifier[aggregates] = identifier[self] . identifier[build_structure] ( identifier[check_name] , identifier[groups] , identifier[source_name] , identifier[limit] )
keyword[return] identifier[self] . identifier[serialize] ( identifier[aggregates] ) | def dict_output(self, check_name, groups, source_name, limit):
"""
Builds the results into a JSON structure and writes it to the file buffer.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
"""
aggregates = self.build_structure(check_name, groups, source_name, limit)
return self.serialize(aggregates) |
def DTAgentProgram(belief_state):
"A decision-theoretic agent. [Fig. 13.1]"
def program(percept):
belief_state.observe(program.action, percept)
program.action = argmax(belief_state.actions(),
belief_state.expected_outcome_utility)
return program.action
program.action = None
return program | def function[DTAgentProgram, parameter[belief_state]]:
constant[A decision-theoretic agent. [Fig. 13.1]]
def function[program, parameter[percept]]:
call[name[belief_state].observe, parameter[name[program].action, name[percept]]]
name[program].action assign[=] call[name[argmax], parameter[call[name[belief_state].actions, parameter[]], name[belief_state].expected_outcome_utility]]
return[name[program].action]
name[program].action assign[=] constant[None]
return[name[program]] | keyword[def] identifier[DTAgentProgram] ( identifier[belief_state] ):
literal[string]
keyword[def] identifier[program] ( identifier[percept] ):
identifier[belief_state] . identifier[observe] ( identifier[program] . identifier[action] , identifier[percept] )
identifier[program] . identifier[action] = identifier[argmax] ( identifier[belief_state] . identifier[actions] (),
identifier[belief_state] . identifier[expected_outcome_utility] )
keyword[return] identifier[program] . identifier[action]
identifier[program] . identifier[action] = keyword[None]
keyword[return] identifier[program] | def DTAgentProgram(belief_state):
"""A decision-theoretic agent. [Fig. 13.1]"""
def program(percept):
belief_state.observe(program.action, percept)
program.action = argmax(belief_state.actions(), belief_state.expected_outcome_utility)
return program.action
program.action = None
return program |
def draw_uppercase_key(self, surface, key):
"""Default drawing method for uppercase key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'\u21e7'
if key.is_activated():
key.value = u'\u21ea'
self.draw_character_key(surface, key, True) | def function[draw_uppercase_key, parameter[self, surface, key]]:
constant[Default drawing method for uppercase key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
]
name[key].value assign[=] constant[⇧]
if call[name[key].is_activated, parameter[]] begin[:]
name[key].value assign[=] constant[⇪]
call[name[self].draw_character_key, parameter[name[surface], name[key], constant[True]]] | keyword[def] identifier[draw_uppercase_key] ( identifier[self] , identifier[surface] , identifier[key] ):
literal[string]
identifier[key] . identifier[value] = literal[string]
keyword[if] identifier[key] . identifier[is_activated] ():
identifier[key] . identifier[value] = literal[string]
identifier[self] . identifier[draw_character_key] ( identifier[surface] , identifier[key] , keyword[True] ) | def draw_uppercase_key(self, surface, key):
"""Default drawing method for uppercase key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'⇧'
if key.is_activated():
key.value = u'⇪' # depends on [control=['if'], data=[]]
self.draw_character_key(surface, key, True) |
def _get_same_codes(self):
"""get SAME codes, load into a dict and cache"""
same = {}
url = '''http://www.nws.noaa.gov/nwr/data/SameCode.txt'''
# pylint: disable=E1103
raw = requests.get(url).content.decode('utf-8') # py3 compatibility
for row in raw.split('\n'):
try:
code, local, state = str(row).strip().split(',')
location = {'code': code, 'local': local, 'state': state.strip()}
# when I contacted the nws to add a missing same code
# they added a space before the state in the samecodes file
# stripping it out
same[code] = location
finally:
pass
cache = open(self._same_cache_file, 'wb')
cPickle.dump(same, cache)
cache.close()
return same | def function[_get_same_codes, parameter[self]]:
constant[get SAME codes, load into a dict and cache]
variable[same] assign[=] dictionary[[], []]
variable[url] assign[=] constant[http://www.nws.noaa.gov/nwr/data/SameCode.txt]
variable[raw] assign[=] call[call[name[requests].get, parameter[name[url]]].content.decode, parameter[constant[utf-8]]]
for taget[name[row]] in starred[call[name[raw].split, parameter[constant[
]]]] begin[:]
<ast.Try object at 0x7da18c4ce3e0>
variable[cache] assign[=] call[name[open], parameter[name[self]._same_cache_file, constant[wb]]]
call[name[cPickle].dump, parameter[name[same], name[cache]]]
call[name[cache].close, parameter[]]
return[name[same]] | keyword[def] identifier[_get_same_codes] ( identifier[self] ):
literal[string]
identifier[same] ={}
identifier[url] = literal[string]
identifier[raw] = identifier[requests] . identifier[get] ( identifier[url] ). identifier[content] . identifier[decode] ( literal[string] )
keyword[for] identifier[row] keyword[in] identifier[raw] . identifier[split] ( literal[string] ):
keyword[try] :
identifier[code] , identifier[local] , identifier[state] = identifier[str] ( identifier[row] ). identifier[strip] (). identifier[split] ( literal[string] )
identifier[location] ={ literal[string] : identifier[code] , literal[string] : identifier[local] , literal[string] : identifier[state] . identifier[strip] ()}
identifier[same] [ identifier[code] ]= identifier[location]
keyword[finally] :
keyword[pass]
identifier[cache] = identifier[open] ( identifier[self] . identifier[_same_cache_file] , literal[string] )
identifier[cPickle] . identifier[dump] ( identifier[same] , identifier[cache] )
identifier[cache] . identifier[close] ()
keyword[return] identifier[same] | def _get_same_codes(self):
"""get SAME codes, load into a dict and cache"""
same = {}
url = 'http://www.nws.noaa.gov/nwr/data/SameCode.txt'
# pylint: disable=E1103
raw = requests.get(url).content.decode('utf-8') # py3 compatibility
for row in raw.split('\n'):
try:
(code, local, state) = str(row).strip().split(',')
location = {'code': code, 'local': local, 'state': state.strip()}
# when I contacted the nws to add a missing same code
# they added a space before the state in the samecodes file
# stripping it out
same[code] = location # depends on [control=['try'], data=[]]
finally:
pass # depends on [control=['for'], data=['row']]
cache = open(self._same_cache_file, 'wb')
cPickle.dump(same, cache)
cache.close()
return same |
def out_putfile(self, fpath):
""" 輸出成 CSV 檔
:param path fpath: 檔案輸出位置
.. todo:: files output using `with` syntax.
"""
with open(fpath, 'w') as csv_file:
output = csv.writer(csv_file)
output.writerows(self.__raw_data) | def function[out_putfile, parameter[self, fpath]]:
constant[ 輸出成 CSV 檔
:param path fpath: 檔案輸出位置
.. todo:: files output using `with` syntax.
]
with call[name[open], parameter[name[fpath], constant[w]]] begin[:]
variable[output] assign[=] call[name[csv].writer, parameter[name[csv_file]]]
call[name[output].writerows, parameter[name[self].__raw_data]] | keyword[def] identifier[out_putfile] ( identifier[self] , identifier[fpath] ):
literal[string]
keyword[with] identifier[open] ( identifier[fpath] , literal[string] ) keyword[as] identifier[csv_file] :
identifier[output] = identifier[csv] . identifier[writer] ( identifier[csv_file] )
identifier[output] . identifier[writerows] ( identifier[self] . identifier[__raw_data] ) | def out_putfile(self, fpath):
""" 輸出成 CSV 檔
:param path fpath: 檔案輸出位置
.. todo:: files output using `with` syntax.
"""
with open(fpath, 'w') as csv_file:
output = csv.writer(csv_file)
output.writerows(self.__raw_data) # depends on [control=['with'], data=['csv_file']] |
def _delete_forever_values(self, forever_key):
"""
Delete all of the keys that have been stored forever.
:type forever_key: str
"""
forever = self._store.connection().lrange(forever_key, 0, -1)
if len(forever) > 0:
self._store.connection().delete(*forever) | def function[_delete_forever_values, parameter[self, forever_key]]:
constant[
Delete all of the keys that have been stored forever.
:type forever_key: str
]
variable[forever] assign[=] call[call[name[self]._store.connection, parameter[]].lrange, parameter[name[forever_key], constant[0], <ast.UnaryOp object at 0x7da1b19e76a0>]]
if compare[call[name[len], parameter[name[forever]]] greater[>] constant[0]] begin[:]
call[call[name[self]._store.connection, parameter[]].delete, parameter[<ast.Starred object at 0x7da1b1a3f5b0>]] | keyword[def] identifier[_delete_forever_values] ( identifier[self] , identifier[forever_key] ):
literal[string]
identifier[forever] = identifier[self] . identifier[_store] . identifier[connection] (). identifier[lrange] ( identifier[forever_key] , literal[int] ,- literal[int] )
keyword[if] identifier[len] ( identifier[forever] )> literal[int] :
identifier[self] . identifier[_store] . identifier[connection] (). identifier[delete] (* identifier[forever] ) | def _delete_forever_values(self, forever_key):
"""
Delete all of the keys that have been stored forever.
:type forever_key: str
"""
forever = self._store.connection().lrange(forever_key, 0, -1)
if len(forever) > 0:
self._store.connection().delete(*forever) # depends on [control=['if'], data=[]] |
def get_raw_data_from_url(pdb_id, reduced=False):
"""" Get the msgpack unpacked data given a PDB id.
:param pdb_id: the input PDB id
:return the unpacked data (a dict) """
url = get_url(pdb_id,reduced)
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip_data(response.read())
else:
data = response.read()
return _unpack(data) | def function[get_raw_data_from_url, parameter[pdb_id, reduced]]:
constant[" Get the msgpack unpacked data given a PDB id.
:param pdb_id: the input PDB id
:return the unpacked data (a dict) ]
variable[url] assign[=] call[name[get_url], parameter[name[pdb_id], name[reduced]]]
variable[request] assign[=] call[name[urllib2].Request, parameter[name[url]]]
call[name[request].add_header, parameter[constant[Accept-encoding], constant[gzip]]]
variable[response] assign[=] call[name[urllib2].urlopen, parameter[name[request]]]
if compare[call[call[name[response].info, parameter[]].get, parameter[constant[Content-Encoding]]] equal[==] constant[gzip]] begin[:]
variable[data] assign[=] call[name[ungzip_data], parameter[call[name[response].read, parameter[]]]]
return[call[name[_unpack], parameter[name[data]]]] | keyword[def] identifier[get_raw_data_from_url] ( identifier[pdb_id] , identifier[reduced] = keyword[False] ):
literal[string]
identifier[url] = identifier[get_url] ( identifier[pdb_id] , identifier[reduced] )
identifier[request] = identifier[urllib2] . identifier[Request] ( identifier[url] )
identifier[request] . identifier[add_header] ( literal[string] , literal[string] )
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[request] )
keyword[if] identifier[response] . identifier[info] (). identifier[get] ( literal[string] )== literal[string] :
identifier[data] = identifier[ungzip_data] ( identifier[response] . identifier[read] ())
keyword[else] :
identifier[data] = identifier[response] . identifier[read] ()
keyword[return] identifier[_unpack] ( identifier[data] ) | def get_raw_data_from_url(pdb_id, reduced=False):
"""" Get the msgpack unpacked data given a PDB id.
:param pdb_id: the input PDB id
:return the unpacked data (a dict) """
url = get_url(pdb_id, reduced)
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip_data(response.read()) # depends on [control=['if'], data=[]]
else:
data = response.read()
return _unpack(data) |
def pyeapi_config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
.. versionadded:: 2019.2.0
Configures the Arista switch with the specified commands, via the ``pyeapi``
library. This function forwards the existing connection details to the
:mod:`pyeapi.run_commands <salt.module.arista_pyeapi.run_commands>`
execution function.
commands
The list of configuration commands to load on the Arista switch.
.. note::
This argument is ignored when ``config_file`` is specified.
config_file
The source file with the configuration commands to be sent to the device.
The file can also be a template that can be rendered using the template
engine of choice. This can be specified using the absolute path to the
file, or using one of the following URL schemes:
- ``salt://``
- ``https://``
- ``ftp:/``
- ``s3:/``
- ``swift://``
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context: ``None``
Variables to add to the template context.
defaults: ``None``
Default values of the ``context`` dict.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. Ignored if
``config_file`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_config 'ntp server 1.2.3.4'
'''
pyeapi_kwargs = pyeapi_nxos_api_args(**kwargs)
return __salt__['pyeapi.config'](commands=commands,
config_file=config_file,
template_engine=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv,
**pyeapi_kwargs) | def function[pyeapi_config, parameter[commands, config_file, template_engine, context, defaults, saltenv]]:
constant[
.. versionadded:: 2019.2.0
Configures the Arista switch with the specified commands, via the ``pyeapi``
library. This function forwards the existing connection details to the
:mod:`pyeapi.run_commands <salt.module.arista_pyeapi.run_commands>`
execution function.
commands
The list of configuration commands to load on the Arista switch.
.. note::
This argument is ignored when ``config_file`` is specified.
config_file
The source file with the configuration commands to be sent to the device.
The file can also be a template that can be rendered using the template
engine of choice. This can be specified using the absolute path to the
file, or using one of the following URL schemes:
- ``salt://``
- ``https://``
- ``ftp:/``
- ``s3:/``
- ``swift://``
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context: ``None``
Variables to add to the template context.
defaults: ``None``
Default values of the ``context`` dict.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. Ignored if
``config_file`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_config 'ntp server 1.2.3.4'
]
variable[pyeapi_kwargs] assign[=] call[name[pyeapi_nxos_api_args], parameter[]]
return[call[call[name[__salt__]][constant[pyeapi.config]], parameter[]]] | keyword[def] identifier[pyeapi_config] ( identifier[commands] = keyword[None] ,
identifier[config_file] = keyword[None] ,
identifier[template_engine] = literal[string] ,
identifier[context] = keyword[None] ,
identifier[defaults] = keyword[None] ,
identifier[saltenv] = literal[string] ,
** identifier[kwargs] ):
literal[string]
identifier[pyeapi_kwargs] = identifier[pyeapi_nxos_api_args] (** identifier[kwargs] )
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[commands] = identifier[commands] ,
identifier[config_file] = identifier[config_file] ,
identifier[template_engine] = identifier[template_engine] ,
identifier[context] = identifier[context] ,
identifier[defaults] = identifier[defaults] ,
identifier[saltenv] = identifier[saltenv] ,
** identifier[pyeapi_kwargs] ) | def pyeapi_config(commands=None, config_file=None, template_engine='jinja', context=None, defaults=None, saltenv='base', **kwargs):
"""
.. versionadded:: 2019.2.0
Configures the Arista switch with the specified commands, via the ``pyeapi``
library. This function forwards the existing connection details to the
:mod:`pyeapi.run_commands <salt.module.arista_pyeapi.run_commands>`
execution function.
commands
The list of configuration commands to load on the Arista switch.
.. note::
This argument is ignored when ``config_file`` is specified.
config_file
The source file with the configuration commands to be sent to the device.
The file can also be a template that can be rendered using the template
engine of choice. This can be specified using the absolute path to the
file, or using one of the following URL schemes:
- ``salt://``
- ``https://``
- ``ftp:/``
- ``s3:/``
- ``swift://``
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context: ``None``
Variables to add to the template context.
defaults: ``None``
Default values of the ``context`` dict.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. Ignored if
``config_file`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_config 'ntp server 1.2.3.4'
"""
pyeapi_kwargs = pyeapi_nxos_api_args(**kwargs)
return __salt__['pyeapi.config'](commands=commands, config_file=config_file, template_engine=template_engine, context=context, defaults=defaults, saltenv=saltenv, **pyeapi_kwargs) |
def peak_generation_per_technology(self):
"""
Peak generation of each technology in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology
"""
peak_generation = defaultdict(float)
for gen in self.generators:
peak_generation[gen.type] += gen.nominal_capacity
return pd.Series(peak_generation) | def function[peak_generation_per_technology, parameter[self]]:
constant[
Peak generation of each technology in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology
]
variable[peak_generation] assign[=] call[name[defaultdict], parameter[name[float]]]
for taget[name[gen]] in starred[name[self].generators] begin[:]
<ast.AugAssign object at 0x7da1b032a5f0>
return[call[name[pd].Series, parameter[name[peak_generation]]]] | keyword[def] identifier[peak_generation_per_technology] ( identifier[self] ):
literal[string]
identifier[peak_generation] = identifier[defaultdict] ( identifier[float] )
keyword[for] identifier[gen] keyword[in] identifier[self] . identifier[generators] :
identifier[peak_generation] [ identifier[gen] . identifier[type] ]+= identifier[gen] . identifier[nominal_capacity]
keyword[return] identifier[pd] . identifier[Series] ( identifier[peak_generation] ) | def peak_generation_per_technology(self):
"""
Peak generation of each technology in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology
"""
peak_generation = defaultdict(float)
for gen in self.generators:
peak_generation[gen.type] += gen.nominal_capacity # depends on [control=['for'], data=['gen']]
return pd.Series(peak_generation) |
def roc_auc(model, X, y=None, ax=None, **kwargs):
"""ROCAUC Quick method:
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : the Scikit-Learn estimator
Should be an instance of a classifier, else the __init__ will
return an error.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : the axis to plot the figure on.
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X = data[features].values
>>> y = data.occupancy.values
>>> roc_auc(LogisticRegression(), X, y)
Returns
-------
ax : matplotlib axes
Returns the axes that the roc-auc curve was drawn on.
"""
# Instantiate the visualizer
visualizer = ROCAUC(model, ax, **kwargs)
# Create the train and test splits
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train, **kwargs)
visualizer.score(X_test, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax | def function[roc_auc, parameter[model, X, y, ax]]:
constant[ROCAUC Quick method:
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : the Scikit-Learn estimator
Should be an instance of a classifier, else the __init__ will
return an error.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : the axis to plot the figure on.
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X = data[features].values
>>> y = data.occupancy.values
>>> roc_auc(LogisticRegression(), X, y)
Returns
-------
ax : matplotlib axes
Returns the axes that the roc-auc curve was drawn on.
]
variable[visualizer] assign[=] call[name[ROCAUC], parameter[name[model], name[ax]]]
<ast.Tuple object at 0x7da20c6aa350> assign[=] call[name[train_test_split], parameter[name[X], name[y]]]
call[name[visualizer].fit, parameter[name[X_train], name[y_train]]]
call[name[visualizer].score, parameter[name[X_test], name[y_test]]]
call[name[visualizer].finalize, parameter[]]
return[name[visualizer].ax] | keyword[def] identifier[roc_auc] ( identifier[model] , identifier[X] , identifier[y] = keyword[None] , identifier[ax] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[visualizer] = identifier[ROCAUC] ( identifier[model] , identifier[ax] ,** identifier[kwargs] )
identifier[X_train] , identifier[X_test] , identifier[y_train] , identifier[y_test] = identifier[train_test_split] ( identifier[X] , identifier[y] , identifier[test_size] = literal[int] )
identifier[visualizer] . identifier[fit] ( identifier[X_train] , identifier[y_train] ,** identifier[kwargs] )
identifier[visualizer] . identifier[score] ( identifier[X_test] , identifier[y_test] )
identifier[visualizer] . identifier[finalize] ()
keyword[return] identifier[visualizer] . identifier[ax] | def roc_auc(model, X, y=None, ax=None, **kwargs):
"""ROCAUC Quick method:
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : the Scikit-Learn estimator
Should be an instance of a classifier, else the __init__ will
return an error.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : the axis to plot the figure on.
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X = data[features].values
>>> y = data.occupancy.values
>>> roc_auc(LogisticRegression(), X, y)
Returns
-------
ax : matplotlib axes
Returns the axes that the roc-auc curve was drawn on.
"""
# Instantiate the visualizer
visualizer = ROCAUC(model, ax, **kwargs)
# Create the train and test splits
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train, **kwargs)
visualizer.score(X_test, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax |
def keep(self, diff):
""" Mark this diff (or volume) to be kept in path. """
self._keepVol(diff.toVol)
self._keepVol(diff.fromVol) | def function[keep, parameter[self, diff]]:
constant[ Mark this diff (or volume) to be kept in path. ]
call[name[self]._keepVol, parameter[name[diff].toVol]]
call[name[self]._keepVol, parameter[name[diff].fromVol]] | keyword[def] identifier[keep] ( identifier[self] , identifier[diff] ):
literal[string]
identifier[self] . identifier[_keepVol] ( identifier[diff] . identifier[toVol] )
identifier[self] . identifier[_keepVol] ( identifier[diff] . identifier[fromVol] ) | def keep(self, diff):
""" Mark this diff (or volume) to be kept in path. """
self._keepVol(diff.toVol)
self._keepVol(diff.fromVol) |
def _init_object(self, catalog_id, proxy, runtime, cat_name, cat_class):
"""Initialize this object as an OsidObject....do we need this??
From the Mongo learning impl, but seems unnecessary for Handcar"""
self._catalog_identifier = None
self._init_proxy_and_runtime(proxy, runtime)
self._catalog = cat_class(self._my_catalog_map)
self._catalog._authority = self._authority # there should be a better way...
self._catalog_id = self._catalog.get_id()
self._forms = dict() | def function[_init_object, parameter[self, catalog_id, proxy, runtime, cat_name, cat_class]]:
constant[Initialize this object as an OsidObject....do we need this??
From the Mongo learning impl, but seems unnecessary for Handcar]
name[self]._catalog_identifier assign[=] constant[None]
call[name[self]._init_proxy_and_runtime, parameter[name[proxy], name[runtime]]]
name[self]._catalog assign[=] call[name[cat_class], parameter[name[self]._my_catalog_map]]
name[self]._catalog._authority assign[=] name[self]._authority
name[self]._catalog_id assign[=] call[name[self]._catalog.get_id, parameter[]]
name[self]._forms assign[=] call[name[dict], parameter[]] | keyword[def] identifier[_init_object] ( identifier[self] , identifier[catalog_id] , identifier[proxy] , identifier[runtime] , identifier[cat_name] , identifier[cat_class] ):
literal[string]
identifier[self] . identifier[_catalog_identifier] = keyword[None]
identifier[self] . identifier[_init_proxy_and_runtime] ( identifier[proxy] , identifier[runtime] )
identifier[self] . identifier[_catalog] = identifier[cat_class] ( identifier[self] . identifier[_my_catalog_map] )
identifier[self] . identifier[_catalog] . identifier[_authority] = identifier[self] . identifier[_authority]
identifier[self] . identifier[_catalog_id] = identifier[self] . identifier[_catalog] . identifier[get_id] ()
identifier[self] . identifier[_forms] = identifier[dict] () | def _init_object(self, catalog_id, proxy, runtime, cat_name, cat_class):
"""Initialize this object as an OsidObject....do we need this??
From the Mongo learning impl, but seems unnecessary for Handcar"""
self._catalog_identifier = None
self._init_proxy_and_runtime(proxy, runtime)
self._catalog = cat_class(self._my_catalog_map)
self._catalog._authority = self._authority # there should be a better way...
self._catalog_id = self._catalog.get_id()
self._forms = dict() |
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c | def function[_locals, parameter[self, d]]:
constant[Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
]
if <ast.UnaryOp object at 0x7da1b1d5c880> begin[:]
return[name[self]]
variable[c] assign[=] call[name[self]._copy, parameter[]]
call[name[c]._data.update, parameter[name[d]]]
return[name[c]] | keyword[def] identifier[_locals] ( identifier[self] , identifier[d] ):
literal[string]
keyword[if] keyword[not] identifier[d] :
keyword[return] identifier[self]
identifier[c] = identifier[self] . identifier[_copy] ()
identifier[c] . identifier[_data] . identifier[update] ( identifier[d] )
keyword[return] identifier[c] | def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self # depends on [control=['if'], data=[]]
c = self._copy()
c._data.update(d)
return c |
def join_tags(tags):
"""
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain DELIMITER will be double quoted.
Adapted from Taggit's _edit_string_for_tags()
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
delimiter = settings.TAGGIT_SELECTIZE['DELIMITER']
for tag in tags:
name = tag.name
if delimiter in name or ' ' in name:
names.append('"%s"' % name)
else:
names.append(name)
return delimiter.join(sorted(names)) | def function[join_tags, parameter[tags]]:
constant[
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain DELIMITER will be double quoted.
Adapted from Taggit's _edit_string_for_tags()
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
]
variable[names] assign[=] list[[]]
variable[delimiter] assign[=] call[name[settings].TAGGIT_SELECTIZE][constant[DELIMITER]]
for taget[name[tag]] in starred[name[tags]] begin[:]
variable[name] assign[=] name[tag].name
if <ast.BoolOp object at 0x7da18ede4820> begin[:]
call[name[names].append, parameter[binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[name]]]]
return[call[name[delimiter].join, parameter[call[name[sorted], parameter[name[names]]]]]] | keyword[def] identifier[join_tags] ( identifier[tags] ):
literal[string]
identifier[names] =[]
identifier[delimiter] = identifier[settings] . identifier[TAGGIT_SELECTIZE] [ literal[string] ]
keyword[for] identifier[tag] keyword[in] identifier[tags] :
identifier[name] = identifier[tag] . identifier[name]
keyword[if] identifier[delimiter] keyword[in] identifier[name] keyword[or] literal[string] keyword[in] identifier[name] :
identifier[names] . identifier[append] ( literal[string] % identifier[name] )
keyword[else] :
identifier[names] . identifier[append] ( identifier[name] )
keyword[return] identifier[delimiter] . identifier[join] ( identifier[sorted] ( identifier[names] )) | def join_tags(tags):
"""
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain DELIMITER will be double quoted.
Adapted from Taggit's _edit_string_for_tags()
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
delimiter = settings.TAGGIT_SELECTIZE['DELIMITER']
for tag in tags:
name = tag.name
if delimiter in name or ' ' in name:
names.append('"%s"' % name) # depends on [control=['if'], data=[]]
else:
names.append(name) # depends on [control=['for'], data=['tag']]
return delimiter.join(sorted(names)) |
def _cache_get_entry(self, entry_name, key=ENTIRE_ENTRY_KEY, default=False):
"""Returns cache entry parameter value by its name.
:param str entry_name:
:param str key:
:param type default:
:return:
"""
if key is self.ENTIRE_ENTRY_KEY:
return self._cache[entry_name]
return self._cache[entry_name].get(key, default) | def function[_cache_get_entry, parameter[self, entry_name, key, default]]:
constant[Returns cache entry parameter value by its name.
:param str entry_name:
:param str key:
:param type default:
:return:
]
if compare[name[key] is name[self].ENTIRE_ENTRY_KEY] begin[:]
return[call[name[self]._cache][name[entry_name]]]
return[call[call[name[self]._cache][name[entry_name]].get, parameter[name[key], name[default]]]] | keyword[def] identifier[_cache_get_entry] ( identifier[self] , identifier[entry_name] , identifier[key] = identifier[ENTIRE_ENTRY_KEY] , identifier[default] = keyword[False] ):
literal[string]
keyword[if] identifier[key] keyword[is] identifier[self] . identifier[ENTIRE_ENTRY_KEY] :
keyword[return] identifier[self] . identifier[_cache] [ identifier[entry_name] ]
keyword[return] identifier[self] . identifier[_cache] [ identifier[entry_name] ]. identifier[get] ( identifier[key] , identifier[default] ) | def _cache_get_entry(self, entry_name, key=ENTIRE_ENTRY_KEY, default=False):
"""Returns cache entry parameter value by its name.
:param str entry_name:
:param str key:
:param type default:
:return:
"""
if key is self.ENTIRE_ENTRY_KEY:
return self._cache[entry_name] # depends on [control=['if'], data=[]]
return self._cache[entry_name].get(key, default) |
def _read_proc_file(path, opts):
'''
Return a dict of JID metadata, or None
'''
serial = salt.payload.Serial(opts)
current_thread = threading.currentThread().name
pid = os.getpid()
with salt.utils.files.fopen(path, 'rb') as fp_:
buf = fp_.read()
fp_.close()
if buf:
data = serial.loads(buf)
else:
# Proc file is empty, remove
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
if not isinstance(data, dict):
# Invalid serial object
return None
if not salt.utils.process.os_is_running(data['pid']):
# The process is no longer running, clear out the file and
# continue
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
if opts.get('multiprocessing'):
if data.get('pid') == pid:
return None
else:
if data.get('pid') != pid:
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
if data.get('jid') == current_thread:
return None
if not data.get('jid') in [x.name for x in threading.enumerate()]:
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
if not _check_cmdline(data):
pid = data.get('pid')
if pid:
log.warning(
'PID %s exists but does not appear to be a salt process.', pid
)
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
return data | def function[_read_proc_file, parameter[path, opts]]:
constant[
Return a dict of JID metadata, or None
]
variable[serial] assign[=] call[name[salt].payload.Serial, parameter[name[opts]]]
variable[current_thread] assign[=] call[name[threading].currentThread, parameter[]].name
variable[pid] assign[=] call[name[os].getpid, parameter[]]
with call[name[salt].utils.files.fopen, parameter[name[path], constant[rb]]] begin[:]
variable[buf] assign[=] call[name[fp_].read, parameter[]]
call[name[fp_].close, parameter[]]
if name[buf] begin[:]
variable[data] assign[=] call[name[serial].loads, parameter[name[buf]]]
if <ast.UnaryOp object at 0x7da1b2027520> begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da1b2024d30> begin[:]
<ast.Try object at 0x7da1b2026620>
return[constant[None]]
if call[name[opts].get, parameter[constant[multiprocessing]]] begin[:]
if compare[call[name[data].get, parameter[constant[pid]]] equal[==] name[pid]] begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da1b1c16320> begin[:]
variable[pid] assign[=] call[name[data].get, parameter[constant[pid]]]
if name[pid] begin[:]
call[name[log].warning, parameter[constant[PID %s exists but does not appear to be a salt process.], name[pid]]]
<ast.Try object at 0x7da1b1c20f70>
return[constant[None]]
return[name[data]] | keyword[def] identifier[_read_proc_file] ( identifier[path] , identifier[opts] ):
literal[string]
identifier[serial] = identifier[salt] . identifier[payload] . identifier[Serial] ( identifier[opts] )
identifier[current_thread] = identifier[threading] . identifier[currentThread] (). identifier[name]
identifier[pid] = identifier[os] . identifier[getpid] ()
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] , literal[string] ) keyword[as] identifier[fp_] :
identifier[buf] = identifier[fp_] . identifier[read] ()
identifier[fp_] . identifier[close] ()
keyword[if] identifier[buf] :
identifier[data] = identifier[serial] . identifier[loads] ( identifier[buf] )
keyword[else] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[debug] ( literal[string] , identifier[path] )
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[salt] . identifier[utils] . identifier[process] . identifier[os_is_running] ( identifier[data] [ literal[string] ]):
keyword[try] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[debug] ( literal[string] , identifier[path] )
keyword[return] keyword[None]
keyword[if] identifier[opts] . identifier[get] ( literal[string] ):
keyword[if] identifier[data] . identifier[get] ( literal[string] )== identifier[pid] :
keyword[return] keyword[None]
keyword[else] :
keyword[if] identifier[data] . identifier[get] ( literal[string] )!= identifier[pid] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[debug] ( literal[string] , identifier[path] )
keyword[return] keyword[None]
keyword[if] identifier[data] . identifier[get] ( literal[string] )== identifier[current_thread] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[data] . identifier[get] ( literal[string] ) keyword[in] [ identifier[x] . identifier[name] keyword[for] identifier[x] keyword[in] identifier[threading] . identifier[enumerate] ()]:
keyword[try] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[debug] ( literal[string] , identifier[path] )
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[_check_cmdline] ( identifier[data] ):
identifier[pid] = identifier[data] . identifier[get] ( literal[string] )
keyword[if] identifier[pid] :
identifier[log] . identifier[warning] (
literal[string] , identifier[pid]
)
keyword[try] :
identifier[os] . identifier[remove] ( identifier[path] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[debug] ( literal[string] , identifier[path] )
keyword[return] keyword[None]
keyword[return] identifier[data] | def _read_proc_file(path, opts):
"""
Return a dict of JID metadata, or None
"""
serial = salt.payload.Serial(opts)
current_thread = threading.currentThread().name
pid = os.getpid()
with salt.utils.files.fopen(path, 'rb') as fp_:
buf = fp_.read()
fp_.close()
if buf:
data = serial.loads(buf) # depends on [control=['if'], data=[]]
else:
# Proc file is empty, remove
try:
os.remove(path) # depends on [control=['try'], data=[]]
except IOError:
log.debug('Unable to remove proc file %s.', path) # depends on [control=['except'], data=[]]
return None # depends on [control=['with'], data=['fp_']]
if not isinstance(data, dict):
# Invalid serial object
return None # depends on [control=['if'], data=[]]
if not salt.utils.process.os_is_running(data['pid']):
# The process is no longer running, clear out the file and
# continue
try:
os.remove(path) # depends on [control=['try'], data=[]]
except IOError:
log.debug('Unable to remove proc file %s.', path) # depends on [control=['except'], data=[]]
return None # depends on [control=['if'], data=[]]
if opts.get('multiprocessing'):
if data.get('pid') == pid:
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if data.get('pid') != pid:
try:
os.remove(path) # depends on [control=['try'], data=[]]
except IOError:
log.debug('Unable to remove proc file %s.', path) # depends on [control=['except'], data=[]]
return None # depends on [control=['if'], data=[]]
if data.get('jid') == current_thread:
return None # depends on [control=['if'], data=[]]
if not data.get('jid') in [x.name for x in threading.enumerate()]:
try:
os.remove(path) # depends on [control=['try'], data=[]]
except IOError:
log.debug('Unable to remove proc file %s.', path) # depends on [control=['except'], data=[]]
return None # depends on [control=['if'], data=[]]
if not _check_cmdline(data):
pid = data.get('pid')
if pid:
log.warning('PID %s exists but does not appear to be a salt process.', pid) # depends on [control=['if'], data=[]]
try:
os.remove(path) # depends on [control=['try'], data=[]]
except IOError:
log.debug('Unable to remove proc file %s.', path) # depends on [control=['except'], data=[]]
return None # depends on [control=['if'], data=[]]
return data |
def get_egrc_config(cli_egrc_path):
"""
Return a Config namedtuple based on the contents of the egrc.
If the egrc is not present, it returns an empty default Config.
This method tries to use the egrc at cli_egrc_path, then the default path.
cli_egrc_path: the path to the egrc as given on the command line via
--config-file
"""
resolved_path = get_priority(cli_egrc_path, DEFAULT_EGRC_PATH, None)
expanded_path = get_expanded_path(resolved_path)
# Start as if nothing was defined in the egrc.
egrc_config = get_empty_config()
if os.path.isfile(expanded_path):
egrc_config = get_config_tuple_from_egrc(expanded_path)
return egrc_config | def function[get_egrc_config, parameter[cli_egrc_path]]:
constant[
Return a Config namedtuple based on the contents of the egrc.
If the egrc is not present, it returns an empty default Config.
This method tries to use the egrc at cli_egrc_path, then the default path.
cli_egrc_path: the path to the egrc as given on the command line via
--config-file
]
variable[resolved_path] assign[=] call[name[get_priority], parameter[name[cli_egrc_path], name[DEFAULT_EGRC_PATH], constant[None]]]
variable[expanded_path] assign[=] call[name[get_expanded_path], parameter[name[resolved_path]]]
variable[egrc_config] assign[=] call[name[get_empty_config], parameter[]]
if call[name[os].path.isfile, parameter[name[expanded_path]]] begin[:]
variable[egrc_config] assign[=] call[name[get_config_tuple_from_egrc], parameter[name[expanded_path]]]
return[name[egrc_config]] | keyword[def] identifier[get_egrc_config] ( identifier[cli_egrc_path] ):
literal[string]
identifier[resolved_path] = identifier[get_priority] ( identifier[cli_egrc_path] , identifier[DEFAULT_EGRC_PATH] , keyword[None] )
identifier[expanded_path] = identifier[get_expanded_path] ( identifier[resolved_path] )
identifier[egrc_config] = identifier[get_empty_config] ()
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[expanded_path] ):
identifier[egrc_config] = identifier[get_config_tuple_from_egrc] ( identifier[expanded_path] )
keyword[return] identifier[egrc_config] | def get_egrc_config(cli_egrc_path):
"""
Return a Config namedtuple based on the contents of the egrc.
If the egrc is not present, it returns an empty default Config.
This method tries to use the egrc at cli_egrc_path, then the default path.
cli_egrc_path: the path to the egrc as given on the command line via
--config-file
"""
resolved_path = get_priority(cli_egrc_path, DEFAULT_EGRC_PATH, None)
expanded_path = get_expanded_path(resolved_path)
# Start as if nothing was defined in the egrc.
egrc_config = get_empty_config()
if os.path.isfile(expanded_path):
egrc_config = get_config_tuple_from_egrc(expanded_path) # depends on [control=['if'], data=[]]
return egrc_config |
def autofit(ts, maxp=5, maxd=2, maxq=5, sc=None):
"""
Utility function to help in fitting an automatically selected ARIMA model based on approximate
Akaike Information Criterion (AIC) values. The model search is based on the heuristic
developed by Hyndman and Khandakar (2008) and described in [[http://www.jstatsoft
.org/v27/i03/paper]]. In contrast to the algorithm in the paper, we use an approximation to
the AIC, rather than an exact value. Note that if the maximum differencing order provided
does not suffice to induce stationarity, the function returns a failure, with the appropriate
message. Additionally, note that the heuristic only considers models that have parameters
satisfying the stationarity/invertibility constraints. Finally, note that our algorithm is
slightly more lenient than the original heuristic. For example, the original heuristic
rejects models with parameters "close" to violating stationarity/invertibility. We only
reject those that actually violate it.
This functionality is even less mature than some of the other model fitting functions here, so
use it with caution.
Parameters
----------
ts:
time series to which to automatically fit an ARIMA model as a Numpy array
maxP:
limit for the AR order
maxD:
limit for differencing order
maxQ:
limit for the MA order
sc:
The SparkContext, required.
returns an ARIMAModel
"""
assert sc != None, "Missing SparkContext"
jmodel = sc._jvm.com.cloudera.sparkts.models.ARIMA.autoFit(_py2java(sc, Vectors.dense(ts)), maxp, maxd, maxq)
return ARIMAModel(jmodel=jmodel, sc=sc) | def function[autofit, parameter[ts, maxp, maxd, maxq, sc]]:
constant[
Utility function to help in fitting an automatically selected ARIMA model based on approximate
Akaike Information Criterion (AIC) values. The model search is based on the heuristic
developed by Hyndman and Khandakar (2008) and described in [[http://www.jstatsoft
.org/v27/i03/paper]]. In contrast to the algorithm in the paper, we use an approximation to
the AIC, rather than an exact value. Note that if the maximum differencing order provided
does not suffice to induce stationarity, the function returns a failure, with the appropriate
message. Additionally, note that the heuristic only considers models that have parameters
satisfying the stationarity/invertibility constraints. Finally, note that our algorithm is
slightly more lenient than the original heuristic. For example, the original heuristic
rejects models with parameters "close" to violating stationarity/invertibility. We only
reject those that actually violate it.
This functionality is even less mature than some of the other model fitting functions here, so
use it with caution.
Parameters
----------
ts:
time series to which to automatically fit an ARIMA model as a Numpy array
maxP:
limit for the AR order
maxD:
limit for differencing order
maxQ:
limit for the MA order
sc:
The SparkContext, required.
returns an ARIMAModel
]
assert[compare[name[sc] not_equal[!=] constant[None]]]
variable[jmodel] assign[=] call[name[sc]._jvm.com.cloudera.sparkts.models.ARIMA.autoFit, parameter[call[name[_py2java], parameter[name[sc], call[name[Vectors].dense, parameter[name[ts]]]]], name[maxp], name[maxd], name[maxq]]]
return[call[name[ARIMAModel], parameter[]]] | keyword[def] identifier[autofit] ( identifier[ts] , identifier[maxp] = literal[int] , identifier[maxd] = literal[int] , identifier[maxq] = literal[int] , identifier[sc] = keyword[None] ):
literal[string]
keyword[assert] identifier[sc] != keyword[None] , literal[string]
identifier[jmodel] = identifier[sc] . identifier[_jvm] . identifier[com] . identifier[cloudera] . identifier[sparkts] . identifier[models] . identifier[ARIMA] . identifier[autoFit] ( identifier[_py2java] ( identifier[sc] , identifier[Vectors] . identifier[dense] ( identifier[ts] )), identifier[maxp] , identifier[maxd] , identifier[maxq] )
keyword[return] identifier[ARIMAModel] ( identifier[jmodel] = identifier[jmodel] , identifier[sc] = identifier[sc] ) | def autofit(ts, maxp=5, maxd=2, maxq=5, sc=None):
"""
Utility function to help in fitting an automatically selected ARIMA model based on approximate
Akaike Information Criterion (AIC) values. The model search is based on the heuristic
developed by Hyndman and Khandakar (2008) and described in [[http://www.jstatsoft
.org/v27/i03/paper]]. In contrast to the algorithm in the paper, we use an approximation to
the AIC, rather than an exact value. Note that if the maximum differencing order provided
does not suffice to induce stationarity, the function returns a failure, with the appropriate
message. Additionally, note that the heuristic only considers models that have parameters
satisfying the stationarity/invertibility constraints. Finally, note that our algorithm is
slightly more lenient than the original heuristic. For example, the original heuristic
rejects models with parameters "close" to violating stationarity/invertibility. We only
reject those that actually violate it.
This functionality is even less mature than some of the other model fitting functions here, so
use it with caution.
Parameters
----------
ts:
time series to which to automatically fit an ARIMA model as a Numpy array
maxP:
limit for the AR order
maxD:
limit for differencing order
maxQ:
limit for the MA order
sc:
The SparkContext, required.
returns an ARIMAModel
"""
assert sc != None, 'Missing SparkContext'
jmodel = sc._jvm.com.cloudera.sparkts.models.ARIMA.autoFit(_py2java(sc, Vectors.dense(ts)), maxp, maxd, maxq)
return ARIMAModel(jmodel=jmodel, sc=sc) |
def dump_csv(data: List[dict], fieldnames: Sequence[str], with_header: bool = False, crlf: bool = False,
tsv: bool = False) -> str:
"""
:param data:
:param fieldnames:
:param with_header:
:param crlf:
:param tsv:
:return: unicode
"""
def force_str(v):
# XXX: Double quotation behaves strangely... so replace (why?)
return dump_json(v).replace('"', "'") if isinstance(v, (dict, list)) else v
with io.StringIO() as sio:
dialect = get_dialect_name(crlf, tsv)
writer = csv.DictWriter(sio, fieldnames=fieldnames, dialect=dialect, extrasaction='ignore')
if with_header:
writer.writeheader()
for x in data:
writer.writerow({k: force_str(v) for k, v in x.items()})
sio.seek(0)
return sio.read() | def function[dump_csv, parameter[data, fieldnames, with_header, crlf, tsv]]:
constant[
:param data:
:param fieldnames:
:param with_header:
:param crlf:
:param tsv:
:return: unicode
]
def function[force_str, parameter[v]]:
return[<ast.IfExp object at 0x7da18c4cead0>]
with call[name[io].StringIO, parameter[]] begin[:]
variable[dialect] assign[=] call[name[get_dialect_name], parameter[name[crlf], name[tsv]]]
variable[writer] assign[=] call[name[csv].DictWriter, parameter[name[sio]]]
if name[with_header] begin[:]
call[name[writer].writeheader, parameter[]]
for taget[name[x]] in starred[name[data]] begin[:]
call[name[writer].writerow, parameter[<ast.DictComp object at 0x7da1b1bfb700>]]
call[name[sio].seek, parameter[constant[0]]]
return[call[name[sio].read, parameter[]]] | keyword[def] identifier[dump_csv] ( identifier[data] : identifier[List] [ identifier[dict] ], identifier[fieldnames] : identifier[Sequence] [ identifier[str] ], identifier[with_header] : identifier[bool] = keyword[False] , identifier[crlf] : identifier[bool] = keyword[False] ,
identifier[tsv] : identifier[bool] = keyword[False] )-> identifier[str] :
literal[string]
keyword[def] identifier[force_str] ( identifier[v] ):
keyword[return] identifier[dump_json] ( identifier[v] ). identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[isinstance] ( identifier[v] ,( identifier[dict] , identifier[list] )) keyword[else] identifier[v]
keyword[with] identifier[io] . identifier[StringIO] () keyword[as] identifier[sio] :
identifier[dialect] = identifier[get_dialect_name] ( identifier[crlf] , identifier[tsv] )
identifier[writer] = identifier[csv] . identifier[DictWriter] ( identifier[sio] , identifier[fieldnames] = identifier[fieldnames] , identifier[dialect] = identifier[dialect] , identifier[extrasaction] = literal[string] )
keyword[if] identifier[with_header] :
identifier[writer] . identifier[writeheader] ()
keyword[for] identifier[x] keyword[in] identifier[data] :
identifier[writer] . identifier[writerow] ({ identifier[k] : identifier[force_str] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[x] . identifier[items] ()})
identifier[sio] . identifier[seek] ( literal[int] )
keyword[return] identifier[sio] . identifier[read] () | def dump_csv(data: List[dict], fieldnames: Sequence[str], with_header: bool=False, crlf: bool=False, tsv: bool=False) -> str:
"""
:param data:
:param fieldnames:
:param with_header:
:param crlf:
:param tsv:
:return: unicode
"""
def force_str(v):
# XXX: Double quotation behaves strangely... so replace (why?)
return dump_json(v).replace('"', "'") if isinstance(v, (dict, list)) else v
with io.StringIO() as sio:
dialect = get_dialect_name(crlf, tsv)
writer = csv.DictWriter(sio, fieldnames=fieldnames, dialect=dialect, extrasaction='ignore')
if with_header:
writer.writeheader() # depends on [control=['if'], data=[]]
for x in data:
writer.writerow({k: force_str(v) for (k, v) in x.items()}) # depends on [control=['for'], data=['x']]
sio.seek(0)
return sio.read() # depends on [control=['with'], data=['sio']] |
def benchmark(N, gates):
"""Create and run a circuit with N qubits and given number of gates"""
qubits = list(range(0, N))
ket = qf.zero_state(N)
for n in range(0, N):
ket = qf.H(n).run(ket)
for _ in range(0, (gates-N)//3):
qubit0, qubit1 = random.sample(qubits, 2)
ket = qf.X(qubit0).run(ket)
ket = qf.T(qubit1).run(ket)
ket = qf.CNOT(qubit0, qubit1).run(ket)
return ket.vec.tensor | def function[benchmark, parameter[N, gates]]:
constant[Create and run a circuit with N qubits and given number of gates]
variable[qubits] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], name[N]]]]]
variable[ket] assign[=] call[name[qf].zero_state, parameter[name[N]]]
for taget[name[n]] in starred[call[name[range], parameter[constant[0], name[N]]]] begin[:]
variable[ket] assign[=] call[call[name[qf].H, parameter[name[n]]].run, parameter[name[ket]]]
for taget[name[_]] in starred[call[name[range], parameter[constant[0], binary_operation[binary_operation[name[gates] - name[N]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]]]] begin[:]
<ast.Tuple object at 0x7da18f723580> assign[=] call[name[random].sample, parameter[name[qubits], constant[2]]]
variable[ket] assign[=] call[call[name[qf].X, parameter[name[qubit0]]].run, parameter[name[ket]]]
variable[ket] assign[=] call[call[name[qf].T, parameter[name[qubit1]]].run, parameter[name[ket]]]
variable[ket] assign[=] call[call[name[qf].CNOT, parameter[name[qubit0], name[qubit1]]].run, parameter[name[ket]]]
return[name[ket].vec.tensor] | keyword[def] identifier[benchmark] ( identifier[N] , identifier[gates] ):
literal[string]
identifier[qubits] = identifier[list] ( identifier[range] ( literal[int] , identifier[N] ))
identifier[ket] = identifier[qf] . identifier[zero_state] ( identifier[N] )
keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[N] ):
identifier[ket] = identifier[qf] . identifier[H] ( identifier[n] ). identifier[run] ( identifier[ket] )
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ,( identifier[gates] - identifier[N] )// literal[int] ):
identifier[qubit0] , identifier[qubit1] = identifier[random] . identifier[sample] ( identifier[qubits] , literal[int] )
identifier[ket] = identifier[qf] . identifier[X] ( identifier[qubit0] ). identifier[run] ( identifier[ket] )
identifier[ket] = identifier[qf] . identifier[T] ( identifier[qubit1] ). identifier[run] ( identifier[ket] )
identifier[ket] = identifier[qf] . identifier[CNOT] ( identifier[qubit0] , identifier[qubit1] ). identifier[run] ( identifier[ket] )
keyword[return] identifier[ket] . identifier[vec] . identifier[tensor] | def benchmark(N, gates):
"""Create and run a circuit with N qubits and given number of gates"""
qubits = list(range(0, N))
ket = qf.zero_state(N)
for n in range(0, N):
ket = qf.H(n).run(ket) # depends on [control=['for'], data=['n']]
for _ in range(0, (gates - N) // 3):
(qubit0, qubit1) = random.sample(qubits, 2)
ket = qf.X(qubit0).run(ket)
ket = qf.T(qubit1).run(ket)
ket = qf.CNOT(qubit0, qubit1).run(ket) # depends on [control=['for'], data=[]]
return ket.vec.tensor |
def check_online(stream):
"""
Used to check user's online opponents and show their online/offline status on page on init
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
opponent_username = packet.get('username')
if session_id and opponent_username:
user_owner = get_user_from_session(session_id)
if user_owner:
# Find all connections including user_owner as opponent
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
logger.debug('User ' + user_owner.username + ' has ' + str(len(online_opponents)) + ' opponents online')
# Send user online statuses of his opponents
socket = ws_connections.get((user_owner.username, opponent_username))
if socket:
online_opponents_usernames = [i[0] for i in online_opponents]
yield from target_message(socket,
{'type': 'gone-online', 'usernames': online_opponents_usernames})
else:
pass # socket for the pair user_owner.username, opponent_username not found
# this can be in case the user has already gone offline
else:
pass # invalid session id
else:
pass | def function[check_online, parameter[stream]]:
constant[
Used to check user's online opponents and show their online/offline status on page on init
]
while constant[True] begin[:]
variable[packet] assign[=] <ast.YieldFrom object at 0x7da20e963b80>
variable[session_id] assign[=] call[name[packet].get, parameter[constant[session_key]]]
variable[opponent_username] assign[=] call[name[packet].get, parameter[constant[username]]]
if <ast.BoolOp object at 0x7da20e960f40> begin[:]
variable[user_owner] assign[=] call[name[get_user_from_session], parameter[name[session_id]]]
if name[user_owner] begin[:]
variable[online_opponents] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da20e960610>, name[ws_connections]]]]]
call[name[logger].debug, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[User ] + name[user_owner].username] + constant[ has ]] + call[name[str], parameter[call[name[len], parameter[name[online_opponents]]]]]] + constant[ opponents online]]]]
variable[socket] assign[=] call[name[ws_connections].get, parameter[tuple[[<ast.Attribute object at 0x7da18f00fb20>, <ast.Name object at 0x7da18f00f010>]]]]
if name[socket] begin[:]
variable[online_opponents_usernames] assign[=] <ast.ListComp object at 0x7da18f00c190>
<ast.YieldFrom object at 0x7da18f00f1c0> | keyword[def] identifier[check_online] ( identifier[stream] ):
literal[string]
keyword[while] keyword[True] :
identifier[packet] = keyword[yield] keyword[from] identifier[stream] . identifier[get] ()
identifier[session_id] = identifier[packet] . identifier[get] ( literal[string] )
identifier[opponent_username] = identifier[packet] . identifier[get] ( literal[string] )
keyword[if] identifier[session_id] keyword[and] identifier[opponent_username] :
identifier[user_owner] = identifier[get_user_from_session] ( identifier[session_id] )
keyword[if] identifier[user_owner] :
identifier[online_opponents] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]== identifier[user_owner] . identifier[username] , identifier[ws_connections] ))
identifier[logger] . identifier[debug] ( literal[string] + identifier[user_owner] . identifier[username] + literal[string] + identifier[str] ( identifier[len] ( identifier[online_opponents] ))+ literal[string] )
identifier[socket] = identifier[ws_connections] . identifier[get] (( identifier[user_owner] . identifier[username] , identifier[opponent_username] ))
keyword[if] identifier[socket] :
identifier[online_opponents_usernames] =[ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[online_opponents] ]
keyword[yield] keyword[from] identifier[target_message] ( identifier[socket] ,
{ literal[string] : literal[string] , literal[string] : identifier[online_opponents_usernames] })
keyword[else] :
keyword[pass]
keyword[else] :
keyword[pass]
keyword[else] :
keyword[pass] | def check_online(stream):
"""
Used to check user's online opponents and show their online/offline status on page on init
"""
while True:
packet = (yield from stream.get())
session_id = packet.get('session_key')
opponent_username = packet.get('username')
if session_id and opponent_username:
user_owner = get_user_from_session(session_id)
if user_owner: # Find all connections including user_owner as opponent
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
logger.debug('User ' + user_owner.username + ' has ' + str(len(online_opponents)) + ' opponents online') # Send user online statuses of his opponents
socket = ws_connections.get((user_owner.username, opponent_username))
if socket:
online_opponents_usernames = [i[0] for i in online_opponents]
yield from target_message(socket, {'type': 'gone-online', 'usernames': online_opponents_usernames}) # depends on [control=['if'], data=[]]
else:
pass # socket for the pair user_owner.username, opponent_username not found # depends on [control=['if'], data=[]]
else: # this can be in case the user has already gone offline
pass # invalid session id # depends on [control=['if'], data=[]]
else:
pass # depends on [control=['while'], data=[]] |
def _try_cast(self, result, obj, numeric_only=False):
"""
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
"""
if obj.ndim > 1:
dtype = obj._values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if is_datetime64tz_dtype(dtype):
# GH 23683
# Prior results _may_ have been generated in UTC.
# Ensure we localize to UTC first before converting
# to the target timezone
try:
result = obj._values._from_sequence(
result, dtype='datetime64[ns, UTC]'
)
result = result.astype(dtype)
except TypeError:
# _try_cast was called at a point where the result
# was already tz-aware
pass
elif is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
result = obj._values._from_sequence(result, dtype=dtype)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result | def function[_try_cast, parameter[self, result, obj, numeric_only]]:
constant[
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
]
if compare[name[obj].ndim greater[>] constant[1]] begin[:]
variable[dtype] assign[=] name[obj]._values.dtype
if <ast.UnaryOp object at 0x7da204346800> begin[:]
if call[name[is_datetime64tz_dtype], parameter[name[dtype]]] begin[:]
<ast.Try object at 0x7da204344400>
return[name[result]] | keyword[def] identifier[_try_cast] ( identifier[self] , identifier[result] , identifier[obj] , identifier[numeric_only] = keyword[False] ):
literal[string]
keyword[if] identifier[obj] . identifier[ndim] > literal[int] :
identifier[dtype] = identifier[obj] . identifier[_values] . identifier[dtype]
keyword[else] :
identifier[dtype] = identifier[obj] . identifier[dtype]
keyword[if] keyword[not] identifier[is_scalar] ( identifier[result] ):
keyword[if] identifier[is_datetime64tz_dtype] ( identifier[dtype] ):
keyword[try] :
identifier[result] = identifier[obj] . identifier[_values] . identifier[_from_sequence] (
identifier[result] , identifier[dtype] = literal[string]
)
identifier[result] = identifier[result] . identifier[astype] ( identifier[dtype] )
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[elif] identifier[is_extension_array_dtype] ( identifier[dtype] ):
keyword[try] :
identifier[result] = identifier[obj] . identifier[_values] . identifier[_from_sequence] ( identifier[result] , identifier[dtype] = identifier[dtype] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[elif] identifier[numeric_only] keyword[and] identifier[is_numeric_dtype] ( identifier[dtype] ) keyword[or] keyword[not] identifier[numeric_only] :
identifier[result] = identifier[maybe_downcast_to_dtype] ( identifier[result] , identifier[dtype] )
keyword[return] identifier[result] | def _try_cast(self, result, obj, numeric_only=False):
"""
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
"""
if obj.ndim > 1:
dtype = obj._values.dtype # depends on [control=['if'], data=[]]
else:
dtype = obj.dtype
if not is_scalar(result):
if is_datetime64tz_dtype(dtype):
# GH 23683
# Prior results _may_ have been generated in UTC.
# Ensure we localize to UTC first before converting
# to the target timezone
try:
result = obj._values._from_sequence(result, dtype='datetime64[ns, UTC]')
result = result.astype(dtype) # depends on [control=['try'], data=[]]
except TypeError:
# _try_cast was called at a point where the result
# was already tz-aware
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
result = obj._values._from_sequence(result, dtype=dtype) # depends on [control=['try'], data=[]]
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return result |
def getSubstituteType(self, elt, ps):
'''if xsi:type does not match the instance type attr,
check to see if it is a derived type substitution.
DONT Return the element's type.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
'''
pyclass = SchemaInstanceType.getTypeDefinition(*self.type)
if pyclass is None:
raise EvaluateException(
'No Type registed for xsi:type=(%s, %s)' %
(self.type[0], self.type[1]), ps.Backtrace(elt))
typeName = _find_type(elt)
prefix,typeName = SplitQName(typeName)
uri = ps.GetElementNSdict(elt).get(prefix)
subclass = SchemaInstanceType.getTypeDefinition(uri, typeName)
if subclass is None:
raise EvaluateException(
'No registered xsi:type=(%s, %s), substitute for xsi:type=(%s, %s)' %
(uri, typeName, self.type[0], self.type[1]), ps.Backtrace(elt))
if not issubclass(subclass, pyclass) and subclass(None) and not issubclass(subclass, pyclass):
raise TypeError(
'Substitute Type (%s, %s) is not derived from %s' %
(self.type[0], self.type[1], pyclass), ps.Backtrace(elt))
return subclass((self.nspname, self.pname)) | def function[getSubstituteType, parameter[self, elt, ps]]:
constant[if xsi:type does not match the instance type attr,
check to see if it is a derived type substitution.
DONT Return the element's type.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
]
variable[pyclass] assign[=] call[name[SchemaInstanceType].getTypeDefinition, parameter[<ast.Starred object at 0x7da20c76f670>]]
if compare[name[pyclass] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76d360>
variable[typeName] assign[=] call[name[_find_type], parameter[name[elt]]]
<ast.Tuple object at 0x7da20c76d690> assign[=] call[name[SplitQName], parameter[name[typeName]]]
variable[uri] assign[=] call[call[name[ps].GetElementNSdict, parameter[name[elt]]].get, parameter[name[prefix]]]
variable[subclass] assign[=] call[name[SchemaInstanceType].getTypeDefinition, parameter[name[uri], name[typeName]]]
if compare[name[subclass] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c76e6e0>
if <ast.BoolOp object at 0x7da20c76c070> begin[:]
<ast.Raise object at 0x7da20c6c7460>
return[call[name[subclass], parameter[tuple[[<ast.Attribute object at 0x7da2047eb2b0>, <ast.Attribute object at 0x7da2047e9d20>]]]]] | keyword[def] identifier[getSubstituteType] ( identifier[self] , identifier[elt] , identifier[ps] ):
literal[string]
identifier[pyclass] = identifier[SchemaInstanceType] . identifier[getTypeDefinition] (* identifier[self] . identifier[type] )
keyword[if] identifier[pyclass] keyword[is] keyword[None] :
keyword[raise] identifier[EvaluateException] (
literal[string] %
( identifier[self] . identifier[type] [ literal[int] ], identifier[self] . identifier[type] [ literal[int] ]), identifier[ps] . identifier[Backtrace] ( identifier[elt] ))
identifier[typeName] = identifier[_find_type] ( identifier[elt] )
identifier[prefix] , identifier[typeName] = identifier[SplitQName] ( identifier[typeName] )
identifier[uri] = identifier[ps] . identifier[GetElementNSdict] ( identifier[elt] ). identifier[get] ( identifier[prefix] )
identifier[subclass] = identifier[SchemaInstanceType] . identifier[getTypeDefinition] ( identifier[uri] , identifier[typeName] )
keyword[if] identifier[subclass] keyword[is] keyword[None] :
keyword[raise] identifier[EvaluateException] (
literal[string] %
( identifier[uri] , identifier[typeName] , identifier[self] . identifier[type] [ literal[int] ], identifier[self] . identifier[type] [ literal[int] ]), identifier[ps] . identifier[Backtrace] ( identifier[elt] ))
keyword[if] keyword[not] identifier[issubclass] ( identifier[subclass] , identifier[pyclass] ) keyword[and] identifier[subclass] ( keyword[None] ) keyword[and] keyword[not] identifier[issubclass] ( identifier[subclass] , identifier[pyclass] ):
keyword[raise] identifier[TypeError] (
literal[string] %
( identifier[self] . identifier[type] [ literal[int] ], identifier[self] . identifier[type] [ literal[int] ], identifier[pyclass] ), identifier[ps] . identifier[Backtrace] ( identifier[elt] ))
keyword[return] identifier[subclass] (( identifier[self] . identifier[nspname] , identifier[self] . identifier[pname] )) | def getSubstituteType(self, elt, ps):
"""if xsi:type does not match the instance type attr,
check to see if it is a derived type substitution.
DONT Return the element's type.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
"""
pyclass = SchemaInstanceType.getTypeDefinition(*self.type)
if pyclass is None:
raise EvaluateException('No Type registed for xsi:type=(%s, %s)' % (self.type[0], self.type[1]), ps.Backtrace(elt)) # depends on [control=['if'], data=[]]
typeName = _find_type(elt)
(prefix, typeName) = SplitQName(typeName)
uri = ps.GetElementNSdict(elt).get(prefix)
subclass = SchemaInstanceType.getTypeDefinition(uri, typeName)
if subclass is None:
raise EvaluateException('No registered xsi:type=(%s, %s), substitute for xsi:type=(%s, %s)' % (uri, typeName, self.type[0], self.type[1]), ps.Backtrace(elt)) # depends on [control=['if'], data=[]]
if not issubclass(subclass, pyclass) and subclass(None) and (not issubclass(subclass, pyclass)):
raise TypeError('Substitute Type (%s, %s) is not derived from %s' % (self.type[0], self.type[1], pyclass), ps.Backtrace(elt)) # depends on [control=['if'], data=[]]
return subclass((self.nspname, self.pname)) |
def config_dir(self):
"""
Returns the configuration directory
"""
home = expanduser('~')
config_dir = os.path.join(home, '.jackal')
return config_dir | def function[config_dir, parameter[self]]:
constant[
Returns the configuration directory
]
variable[home] assign[=] call[name[expanduser], parameter[constant[~]]]
variable[config_dir] assign[=] call[name[os].path.join, parameter[name[home], constant[.jackal]]]
return[name[config_dir]] | keyword[def] identifier[config_dir] ( identifier[self] ):
literal[string]
identifier[home] = identifier[expanduser] ( literal[string] )
identifier[config_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[home] , literal[string] )
keyword[return] identifier[config_dir] | def config_dir(self):
"""
Returns the configuration directory
"""
home = expanduser('~')
config_dir = os.path.join(home, '.jackal')
return config_dir |
def uuid4(self):
"""Make an id in the format of UUID4, but keep in mind this could very well be pseudorandom, and if it is you'll not be truely random, and can regenerate same id if same seed"""
return ''.join([hexchars[self.randint(0,15)] for x in range(0,8)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,4)]) + '-' +\
'4'+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
uuid4special[self.randint(0,3)]+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,12)]) | def function[uuid4, parameter[self]]:
constant[Make an id in the format of UUID4, but keep in mind this could very well be pseudorandom, and if it is you'll not be truely random, and can regenerate same id if same seed]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[constant[].join, parameter[<ast.ListComp object at 0x7da2054a5480>]] + constant[-]] + call[constant[].join, parameter[<ast.ListComp object at 0x7da2054a72b0>]]] + constant[-]] + constant[4]] + call[constant[].join, parameter[<ast.ListComp object at 0x7da2054a5a20>]]] + constant[-]] + call[name[uuid4special]][call[name[self].randint, parameter[constant[0], constant[3]]]]] + call[constant[].join, parameter[<ast.ListComp object at 0x7da20c6a9bd0>]]] + constant[-]] + call[constant[].join, parameter[<ast.ListComp object at 0x7da20c6a88b0>]]]] | keyword[def] identifier[uuid4] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[hexchars] [ identifier[self] . identifier[randint] ( literal[int] , literal[int] )] keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )])+ literal[string] + literal[string] . identifier[join] ([ identifier[hexchars] [ identifier[self] . identifier[randint] ( literal[int] , literal[int] )] keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )])+ literal[string] + literal[string] + literal[string] . identifier[join] ([ identifier[hexchars] [ identifier[self] . identifier[randint] ( literal[int] , literal[int] )] keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )])+ literal[string] + identifier[uuid4special] [ identifier[self] . identifier[randint] ( literal[int] , literal[int] )]+ literal[string] . identifier[join] ([ identifier[hexchars] [ identifier[self] . identifier[randint] ( literal[int] , literal[int] )] keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )])+ literal[string] + literal[string] . identifier[join] ([ identifier[hexchars] [ identifier[self] . identifier[randint] ( literal[int] , literal[int] )] keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )]) | def uuid4(self):
"""Make an id in the format of UUID4, but keep in mind this could very well be pseudorandom, and if it is you'll not be truely random, and can regenerate same id if same seed"""
return ''.join([hexchars[self.randint(0, 15)] for x in range(0, 8)]) + '-' + ''.join([hexchars[self.randint(0, 15)] for x in range(0, 4)]) + '-' + '4' + ''.join([hexchars[self.randint(0, 15)] for x in range(0, 3)]) + '-' + uuid4special[self.randint(0, 3)] + ''.join([hexchars[self.randint(0, 15)] for x in range(0, 3)]) + '-' + ''.join([hexchars[self.randint(0, 15)] for x in range(0, 12)]) |
def compare(left, right, left_label='left', right_label='right',
drop_close=True, **kwargs):
"""Compare the data in two IamDataFrames and return a pd.DataFrame
Parameters
----------
left, right: IamDataFrames
the IamDataFrames to be compared
left_label, right_label: str, default `left`, `right`
column names of the returned dataframe
drop_close: bool, default True
remove all data where `left` and `right` are close
kwargs: passed to `np.isclose()`
"""
ret = pd.concat({right_label: right.data.set_index(right._LONG_IDX),
left_label: left.data.set_index(left._LONG_IDX)}, axis=1)
ret.columns = ret.columns.droplevel(1)
if drop_close:
ret = ret[~np.isclose(ret[left_label], ret[right_label], **kwargs)]
return ret[[right_label, left_label]] | def function[compare, parameter[left, right, left_label, right_label, drop_close]]:
constant[Compare the data in two IamDataFrames and return a pd.DataFrame
Parameters
----------
left, right: IamDataFrames
the IamDataFrames to be compared
left_label, right_label: str, default `left`, `right`
column names of the returned dataframe
drop_close: bool, default True
remove all data where `left` and `right` are close
kwargs: passed to `np.isclose()`
]
variable[ret] assign[=] call[name[pd].concat, parameter[dictionary[[<ast.Name object at 0x7da18dc9bbe0>, <ast.Name object at 0x7da18dc98160>], [<ast.Call object at 0x7da18dc9b340>, <ast.Call object at 0x7da18dc9a3e0>]]]]
name[ret].columns assign[=] call[name[ret].columns.droplevel, parameter[constant[1]]]
if name[drop_close] begin[:]
variable[ret] assign[=] call[name[ret]][<ast.UnaryOp object at 0x7da18dc9a5c0>]
return[call[name[ret]][list[[<ast.Name object at 0x7da18dc9be80>, <ast.Name object at 0x7da18dc9a530>]]]] | keyword[def] identifier[compare] ( identifier[left] , identifier[right] , identifier[left_label] = literal[string] , identifier[right_label] = literal[string] ,
identifier[drop_close] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[ret] = identifier[pd] . identifier[concat] ({ identifier[right_label] : identifier[right] . identifier[data] . identifier[set_index] ( identifier[right] . identifier[_LONG_IDX] ),
identifier[left_label] : identifier[left] . identifier[data] . identifier[set_index] ( identifier[left] . identifier[_LONG_IDX] )}, identifier[axis] = literal[int] )
identifier[ret] . identifier[columns] = identifier[ret] . identifier[columns] . identifier[droplevel] ( literal[int] )
keyword[if] identifier[drop_close] :
identifier[ret] = identifier[ret] [~ identifier[np] . identifier[isclose] ( identifier[ret] [ identifier[left_label] ], identifier[ret] [ identifier[right_label] ],** identifier[kwargs] )]
keyword[return] identifier[ret] [[ identifier[right_label] , identifier[left_label] ]] | def compare(left, right, left_label='left', right_label='right', drop_close=True, **kwargs):
"""Compare the data in two IamDataFrames and return a pd.DataFrame
Parameters
----------
left, right: IamDataFrames
the IamDataFrames to be compared
left_label, right_label: str, default `left`, `right`
column names of the returned dataframe
drop_close: bool, default True
remove all data where `left` and `right` are close
kwargs: passed to `np.isclose()`
"""
ret = pd.concat({right_label: right.data.set_index(right._LONG_IDX), left_label: left.data.set_index(left._LONG_IDX)}, axis=1)
ret.columns = ret.columns.droplevel(1)
if drop_close:
ret = ret[~np.isclose(ret[left_label], ret[right_label], **kwargs)] # depends on [control=['if'], data=[]]
return ret[[right_label, left_label]] |
def map_to_regions(regions, values):
r"""
Maps pore values from a network onto the image from which it was extracted
This function assumes that the pore numbering in the network has remained
unchanged from the region labels in the partitioned image.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into regions and labeled
values : array_like
An array containing the numerical values to insert into each region.
The value at location *n* will be inserted into the image where
``regions`` is *n+1*. This mis-match is caused by the fact that 0's
in the ``regions`` image is assumed to be the backgroung phase, while
pore index 0 is valid.
Notes
-----
This function assumes that the array of pore values are indexed starting
at location 0, while in the region image 0's indicate background phase and
the region indexing starts at 1. That is, region 1 corresponds to pore 0.
"""
values = sp.array(values).flatten()
if sp.size(values) != regions.max() + 1:
raise Exception('Number of values does not match number of regions')
im = sp.zeros_like(regions)
im = values[regions]
return im | def function[map_to_regions, parameter[regions, values]]:
constant[
Maps pore values from a network onto the image from which it was extracted
This function assumes that the pore numbering in the network has remained
unchanged from the region labels in the partitioned image.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into regions and labeled
values : array_like
An array containing the numerical values to insert into each region.
The value at location *n* will be inserted into the image where
``regions`` is *n+1*. This mis-match is caused by the fact that 0's
in the ``regions`` image is assumed to be the backgroung phase, while
pore index 0 is valid.
Notes
-----
This function assumes that the array of pore values are indexed starting
at location 0, while in the region image 0's indicate background phase and
the region indexing starts at 1. That is, region 1 corresponds to pore 0.
]
variable[values] assign[=] call[call[name[sp].array, parameter[name[values]]].flatten, parameter[]]
if compare[call[name[sp].size, parameter[name[values]]] not_equal[!=] binary_operation[call[name[regions].max, parameter[]] + constant[1]]] begin[:]
<ast.Raise object at 0x7da1b0562350>
variable[im] assign[=] call[name[sp].zeros_like, parameter[name[regions]]]
variable[im] assign[=] call[name[values]][name[regions]]
return[name[im]] | keyword[def] identifier[map_to_regions] ( identifier[regions] , identifier[values] ):
literal[string]
identifier[values] = identifier[sp] . identifier[array] ( identifier[values] ). identifier[flatten] ()
keyword[if] identifier[sp] . identifier[size] ( identifier[values] )!= identifier[regions] . identifier[max] ()+ literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[im] = identifier[sp] . identifier[zeros_like] ( identifier[regions] )
identifier[im] = identifier[values] [ identifier[regions] ]
keyword[return] identifier[im] | def map_to_regions(regions, values):
"""
Maps pore values from a network onto the image from which it was extracted
This function assumes that the pore numbering in the network has remained
unchanged from the region labels in the partitioned image.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into regions and labeled
values : array_like
An array containing the numerical values to insert into each region.
The value at location *n* will be inserted into the image where
``regions`` is *n+1*. This mis-match is caused by the fact that 0's
in the ``regions`` image is assumed to be the backgroung phase, while
pore index 0 is valid.
Notes
-----
This function assumes that the array of pore values are indexed starting
at location 0, while in the region image 0's indicate background phase and
the region indexing starts at 1. That is, region 1 corresponds to pore 0.
"""
values = sp.array(values).flatten()
if sp.size(values) != regions.max() + 1:
raise Exception('Number of values does not match number of regions') # depends on [control=['if'], data=[]]
im = sp.zeros_like(regions)
im = values[regions]
return im |
def write_event(self, event):
"""Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
"""
self._lock.acquire()
try:
self._events_writer.WriteEvent(event)
self._event_count += 1
if self._always_flush:
# We flush on every event within the integration test.
self._events_writer.Flush()
if self._event_count == self._check_this_often:
# Every so often, we check whether the size of the file is too big.
self._event_count = 0
# Flush to get an accurate size check.
self._events_writer.Flush()
file_path = os.path.join(self._events_directory,
self.get_current_file_name())
if not tf.io.gfile.exists(file_path):
# The events file does not exist. Perhaps the user had manually
# deleted it after training began. Create a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:
# The current events file has gotten too big. Close the previous
# events writer. Make a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
except IOError as err:
logger.error(
"Writing to %s failed: %s", self.get_current_file_name(), err)
self._lock.release() | def function[write_event, parameter[self, event]]:
constant[Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
]
call[name[self]._lock.acquire, parameter[]]
<ast.Try object at 0x7da1b21eb1f0>
call[name[self]._lock.release, parameter[]] | keyword[def] identifier[write_event] ( identifier[self] , identifier[event] ):
literal[string]
identifier[self] . identifier[_lock] . identifier[acquire] ()
keyword[try] :
identifier[self] . identifier[_events_writer] . identifier[WriteEvent] ( identifier[event] )
identifier[self] . identifier[_event_count] += literal[int]
keyword[if] identifier[self] . identifier[_always_flush] :
identifier[self] . identifier[_events_writer] . identifier[Flush] ()
keyword[if] identifier[self] . identifier[_event_count] == identifier[self] . identifier[_check_this_often] :
identifier[self] . identifier[_event_count] = literal[int]
identifier[self] . identifier[_events_writer] . identifier[Flush] ()
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_events_directory] ,
identifier[self] . identifier[get_current_file_name] ())
keyword[if] keyword[not] identifier[tf] . identifier[io] . identifier[gfile] . identifier[exists] ( identifier[file_path] ):
identifier[self] . identifier[_events_writer] . identifier[Close] ()
identifier[self] . identifier[_events_writer] = identifier[self] . identifier[_create_events_writer] (
identifier[self] . identifier[_events_directory] )
keyword[elif] identifier[tf] . identifier[io] . identifier[gfile] . identifier[stat] ( identifier[file_path] ). identifier[length] > identifier[self] . identifier[_single_file_size_cap_bytes] :
identifier[self] . identifier[_events_writer] . identifier[Close] ()
identifier[self] . identifier[_events_writer] = identifier[self] . identifier[_create_events_writer] (
identifier[self] . identifier[_events_directory] )
keyword[except] identifier[IOError] keyword[as] identifier[err] :
identifier[logger] . identifier[error] (
literal[string] , identifier[self] . identifier[get_current_file_name] (), identifier[err] )
identifier[self] . identifier[_lock] . identifier[release] () | def write_event(self, event):
"""Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
"""
self._lock.acquire()
try:
self._events_writer.WriteEvent(event)
self._event_count += 1
if self._always_flush:
# We flush on every event within the integration test.
self._events_writer.Flush() # depends on [control=['if'], data=[]]
if self._event_count == self._check_this_often:
# Every so often, we check whether the size of the file is too big.
self._event_count = 0
# Flush to get an accurate size check.
self._events_writer.Flush()
file_path = os.path.join(self._events_directory, self.get_current_file_name())
if not tf.io.gfile.exists(file_path):
# The events file does not exist. Perhaps the user had manually
# deleted it after training began. Create a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(self._events_directory) # depends on [control=['if'], data=[]]
elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:
# The current events file has gotten too big. Close the previous
# events writer. Make a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(self._events_directory) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IOError as err:
logger.error('Writing to %s failed: %s', self.get_current_file_name(), err) # depends on [control=['except'], data=['err']]
self._lock.release() |
def _getLayers(self):
""" gets layers for the featuer service """
params = {"f": "json"}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._layers = []
if 'layers' in json_dict:
for l in json_dict["layers"]:
self._layers.append(
layer.FeatureLayer(url=self._url + "/%s" % l['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
) | def function[_getLayers, parameter[self]]:
constant[ gets layers for the featuer service ]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b12284c0>], [<ast.Constant object at 0x7da1b1228790>]]
variable[json_dict] assign[=] call[name[self]._get, parameter[name[self]._url, name[params]]]
name[self]._layers assign[=] list[[]]
if compare[constant[layers] in name[json_dict]] begin[:]
for taget[name[l]] in starred[call[name[json_dict]][constant[layers]]] begin[:]
call[name[self]._layers.append, parameter[call[name[layer].FeatureLayer, parameter[]]]] | keyword[def] identifier[_getLayers] ( identifier[self] ):
literal[string]
identifier[params] ={ literal[string] : literal[string] }
identifier[json_dict] = identifier[self] . identifier[_get] ( identifier[self] . identifier[_url] , identifier[params] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] )
identifier[self] . identifier[_layers] =[]
keyword[if] literal[string] keyword[in] identifier[json_dict] :
keyword[for] identifier[l] keyword[in] identifier[json_dict] [ literal[string] ]:
identifier[self] . identifier[_layers] . identifier[append] (
identifier[layer] . identifier[FeatureLayer] ( identifier[url] = identifier[self] . identifier[_url] + literal[string] % identifier[l] [ literal[string] ],
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] )
) | def _getLayers(self):
""" gets layers for the featuer service """
params = {'f': 'json'}
json_dict = self._get(self._url, params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
self._layers = []
if 'layers' in json_dict:
for l in json_dict['layers']:
self._layers.append(layer.FeatureLayer(url=self._url + '/%s' % l['id'], securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)) # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=['json_dict']] |
def data_discovery(self, region, keywords=None, regex=None, time=None,
boundaries=None, include_quantiles=False):
"""Discover Data Observatory measures. This method returns the full
Data Observatory metadata model for each measure or measures that
match the conditions from the inputs. The full metadata in each row
uniquely defines a measure based on the timespan, geographic
resolution, and normalization (if any). Read more about the metadata
response in `Data Observatory
<https://carto.com/docs/carto-engine/data/measures-functions/#obs_getmetaextent-geometry-metadata-json-max_timespan_rank-max_score_rank-target_geoms>`__
documentation.
Internally, this method finds all measures in `region` that match the
conditions set in `keywords`, `regex`, `time`, and `boundaries` (if
any of them are specified). Then, if `boundaries` is not specified, a
geographical resolution for that measure will be chosen subject to the
type of region specified:
1. If `region` is a table name, then a geographical resolution that
is roughly equal to `region size / number of subunits`.
2. If `region` is a country name or bounding box, then a geographical
resolution will be chosen roughly equal to `region size / 500`.
Since different measures are in some geographic resolutions and not
others, different geographical resolutions for different measures are
oftentimes returned.
.. tip::
To remove the guesswork in how geographical resolutions are
selected, specify one or more boundaries in `boundaries`. See
the boundaries section for each region in the `Data Observatory
catalog <http://cartodb.github.io/bigmetadata/>`__.
The metadata returned from this method can then be used to create raw
tables or for augmenting an existing table from these measures using
:py:meth:`CartoContext.data <cartoframes.context.CartoContext.data>`.
For the full Data Observatory catalog, visit
https://cartodb.github.io/bigmetadata/. When working with the metadata
DataFrame returned from this method, be careful to only remove rows not
columns as `CartoContext.data <cartoframes.context.CartoContext.data>`
generally needs the full metadata.
.. note::
Narrowing down a discovery query using the `keywords`, `regex`, and
`time` filters is important for getting a manageable metadata
set. Besides there being a large number of measures in the DO, a
metadata response has acceptable combinations of measures with
demonimators (normalization and density), and the same measure from
other years.
For example, setting the region to be United States counties with
no filter values set will result in many thousands of measures.
Examples:
Get all European Union measures that mention ``freight``.
.. code::
meta = cc.data_discovery('European Union',
keywords='freight',
time='2010')
print(meta['numer_name'].values)
Arguments:
region (str or list of float): Information about the region of
interest. `region` can be one of three types:
- region name (str): Name of region of interest. Acceptable
values are limited to: 'Australia', 'Brazil', 'Canada',
'European Union', 'France', 'Mexico', 'Spain',
'United Kingdom', 'United States'.
- table name (str): Name of a table in user's CARTO account
with geometries. The region will be the bounding box of
the table.
.. Note:: If a table name is also a valid Data Observatory
region name, the Data Observatory name will be chosen
over the table.
- bounding box (list of float): List of four values (two
lng/lat pairs) in the following order: western longitude,
southern latitude, eastern longitude, and northern latitude.
For example, Switzerland fits in
``[5.9559111595,45.8179931641,10.4920501709,47.808380127]``
.. Note:: Geometry levels are generally chosen by subdividing
the region into the next smallest administrative unit. To
override this behavior, specify the `boundaries` flag. For
example, set `boundaries` to
``'us.census.tiger.census_tract'`` to choose US census
tracts.
keywords (str or list of str, optional): Keyword or list of
keywords in measure description or name. Response will be matched
on all keywords listed (boolean `or`).
regex (str, optional): A regular expression to search the measure
descriptions and names. Note that this relies on PostgreSQL's
case insensitive operator ``~*``. See `PostgreSQL docs
<https://www.postgresql.org/docs/9.5/static/functions-matching.html>`__
for more information.
boundaries (str or list of str, optional): Boundary or list of
boundaries that specify the measure resolution. See the
boundaries section for each region in the `Data Observatory
catalog <http://cartodb.github.io/bigmetadata/>`__.
include_quantiles (bool, optional): Include quantiles calculations
which are a calculation of how a measure compares to all measures
in the full dataset. Defaults to ``False``. If ``True``,
quantiles columns will be returned for each column which has it
pre-calculated.
Returns:
pandas.DataFrame: A dataframe of the complete metadata model for
specific measures based on the search parameters.
Raises:
ValueError: If `region` is a :obj:`list` and does not consist of
four elements, or if `region` is not an acceptable region
CartoException: If `region` is not a table in user account
"""
if isinstance(region, str):
try:
# see if it's a DO region, nest in {}
countrytag = '\'{{{0}}}\''.format(
get_countrytag(region))
boundary = ('SELECT ST_MakeEnvelope(-180.0, -85.0, 180.0, '
'85.0, 4326) AS env, 500::int AS cnt')
except ValueError:
# TODO: make this work for general queries
# see if it's a table
self.sql_client.send(
'EXPLAIN SELECT * FROM {}'.format(region))
boundary = (
'SELECT ST_SetSRID(ST_Extent(the_geom), 4326) AS env, '
'count(*)::int AS cnt FROM {table_name}').format(
table_name=region)
elif isinstance(region, collections.Iterable):
if len(region) != 4:
raise ValueError(
'`region` should be a list of the geographic bounds of a '
'region in the following order: western longitude, '
'southern latitude, eastern longitude, and northern '
'latitude. For example, Switerland fits in '
'``[5.9559111595,45.8179931641,10.4920501709,'
'47.808380127]``.'
)
boundary = ('SELECT ST_MakeEnvelope({0}, {1}, {2}, {3}, 4326) AS '
'env, 500::int AS cnt'.format(*region))
if locals().get('countrytag') is None:
countrytag = 'null'
if keywords:
if isinstance(keywords, str):
keywords = [keywords, ]
kwsearch = ' OR '.join(
('numer_description ILIKE \'%{kw}%\' OR '
'numer_name ILIKE \'%{kw}%\'').format(kw=kw)
for kw in keywords)
kwsearch = '({})'.format(kwsearch)
if regex:
regexsearch = ('(numer_description ~* {regex} OR numer_name '
'~* {regex})').format(regex=utils.pgquote(regex))
if keywords or regex:
subjectfilters = '{kw} {op} {regex}'.format(
kw=kwsearch if keywords else '',
op='OR' if (keywords and regex) else '',
regex=regexsearch if regex else '').strip()
else:
subjectfilters = ''
if isinstance(time, str) or time is None:
time = [time, ]
if isinstance(boundaries, str) or boundaries is None:
boundaries = [boundaries, ]
if all(time) and all(boundaries):
bt_filters = 'valid_geom AND valid_timespan'
elif all(time) or all(boundaries):
bt_filters = 'valid_geom' if all(boundaries) else 'valid_timespan'
else:
bt_filters = ''
if bt_filters and subjectfilters:
filters = 'WHERE ({s}) AND ({bt})'.format(
s=subjectfilters, bt=bt_filters)
elif bt_filters or subjectfilters:
filters = 'WHERE {f}'.format(f=subjectfilters or bt_filters)
else:
filters = ''
quantiles = ('WHERE numer_aggregate <> \'quantile\''
if not include_quantiles else '')
numer_query = utils.minify_sql((
'SELECT',
' numer_id,',
' {geom_id} AS geom_id,',
' {timespan} AS numer_timespan,',
' {normalization} AS normalization',
' FROM',
' OBS_GetAvailableNumerators(',
' (SELECT env FROM envelope),',
' {countrytag},',
' null,', # denom_id
' {geom_id},',
' {timespan})',
'{filters}', )).strip()
# query all numerators for all `time`, `boundaries`, and raw/derived
numers = '\nUNION\n'.join(
numer_query.format(
timespan=utils.pgquote(t),
geom_id=utils.pgquote(b),
normalization=utils.pgquote(n),
countrytag=countrytag,
filters=filters)
for t in time
for b in boundaries
for n in ('predenominated', None))
query = utils.minify_sql((
'WITH envelope AS (',
' {boundary}',
'), numers AS (',
' {numers}',
')',
'SELECT *',
'FROM json_to_recordset(',
' (SELECT OBS_GetMeta(',
' envelope.env,',
' json_agg(numers),',
' 10, 10, envelope.cnt',
' ) AS meta',
'FROM numers, envelope',
'GROUP BY env, cnt)) as data(',
' denom_aggregate text, denom_colname text,',
' denom_description text, denom_geomref_colname text,',
' denom_id text, denom_name text, denom_reltype text,',
' denom_t_description text, denom_tablename text,',
' denom_type text, geom_colname text, geom_description text,',
' geom_geomref_colname text, geom_id text, geom_name text,',
' geom_t_description text, geom_tablename text,',
' geom_timespan text, geom_type text, id numeric,',
' max_score_rank text, max_timespan_rank text,',
' normalization text, num_geoms numeric, numer_aggregate text,',
' numer_colname text, numer_description text,',
' numer_geomref_colname text, numer_id text,',
' numer_name text, numer_t_description text,',
' numer_tablename text, numer_timespan text,',
' numer_type text, score numeric, score_rank numeric,',
' score_rownum numeric, suggested_name text,',
' target_area text, target_geoms text, timespan_rank numeric,',
' timespan_rownum numeric)',
'{quantiles}', )).format(
boundary=boundary,
numers=numers,
quantiles=quantiles).strip()
self._debug_print(query=query)
return self.fetch(query, decode_geom=True) | def function[data_discovery, parameter[self, region, keywords, regex, time, boundaries, include_quantiles]]:
constant[Discover Data Observatory measures. This method returns the full
Data Observatory metadata model for each measure or measures that
match the conditions from the inputs. The full metadata in each row
uniquely defines a measure based on the timespan, geographic
resolution, and normalization (if any). Read more about the metadata
response in `Data Observatory
<https://carto.com/docs/carto-engine/data/measures-functions/#obs_getmetaextent-geometry-metadata-json-max_timespan_rank-max_score_rank-target_geoms>`__
documentation.
Internally, this method finds all measures in `region` that match the
conditions set in `keywords`, `regex`, `time`, and `boundaries` (if
any of them are specified). Then, if `boundaries` is not specified, a
geographical resolution for that measure will be chosen subject to the
type of region specified:
1. If `region` is a table name, then a geographical resolution that
is roughly equal to `region size / number of subunits`.
2. If `region` is a country name or bounding box, then a geographical
resolution will be chosen roughly equal to `region size / 500`.
Since different measures are in some geographic resolutions and not
others, different geographical resolutions for different measures are
oftentimes returned.
.. tip::
To remove the guesswork in how geographical resolutions are
selected, specify one or more boundaries in `boundaries`. See
the boundaries section for each region in the `Data Observatory
catalog <http://cartodb.github.io/bigmetadata/>`__.
The metadata returned from this method can then be used to create raw
tables or for augmenting an existing table from these measures using
:py:meth:`CartoContext.data <cartoframes.context.CartoContext.data>`.
For the full Data Observatory catalog, visit
https://cartodb.github.io/bigmetadata/. When working with the metadata
DataFrame returned from this method, be careful to only remove rows not
columns as `CartoContext.data <cartoframes.context.CartoContext.data>`
generally needs the full metadata.
.. note::
Narrowing down a discovery query using the `keywords`, `regex`, and
`time` filters is important for getting a manageable metadata
set. Besides there being a large number of measures in the DO, a
metadata response has acceptable combinations of measures with
demonimators (normalization and density), and the same measure from
other years.
For example, setting the region to be United States counties with
no filter values set will result in many thousands of measures.
Examples:
Get all European Union measures that mention ``freight``.
.. code::
meta = cc.data_discovery('European Union',
keywords='freight',
time='2010')
print(meta['numer_name'].values)
Arguments:
region (str or list of float): Information about the region of
interest. `region` can be one of three types:
- region name (str): Name of region of interest. Acceptable
values are limited to: 'Australia', 'Brazil', 'Canada',
'European Union', 'France', 'Mexico', 'Spain',
'United Kingdom', 'United States'.
- table name (str): Name of a table in user's CARTO account
with geometries. The region will be the bounding box of
the table.
.. Note:: If a table name is also a valid Data Observatory
region name, the Data Observatory name will be chosen
over the table.
- bounding box (list of float): List of four values (two
lng/lat pairs) in the following order: western longitude,
southern latitude, eastern longitude, and northern latitude.
For example, Switzerland fits in
``[5.9559111595,45.8179931641,10.4920501709,47.808380127]``
.. Note:: Geometry levels are generally chosen by subdividing
the region into the next smallest administrative unit. To
override this behavior, specify the `boundaries` flag. For
example, set `boundaries` to
``'us.census.tiger.census_tract'`` to choose US census
tracts.
keywords (str or list of str, optional): Keyword or list of
keywords in measure description or name. Response will be matched
on all keywords listed (boolean `or`).
regex (str, optional): A regular expression to search the measure
descriptions and names. Note that this relies on PostgreSQL's
case insensitive operator ``~*``. See `PostgreSQL docs
<https://www.postgresql.org/docs/9.5/static/functions-matching.html>`__
for more information.
boundaries (str or list of str, optional): Boundary or list of
boundaries that specify the measure resolution. See the
boundaries section for each region in the `Data Observatory
catalog <http://cartodb.github.io/bigmetadata/>`__.
include_quantiles (bool, optional): Include quantiles calculations
which are a calculation of how a measure compares to all measures
in the full dataset. Defaults to ``False``. If ``True``,
quantiles columns will be returned for each column which has it
pre-calculated.
Returns:
pandas.DataFrame: A dataframe of the complete metadata model for
specific measures based on the search parameters.
Raises:
ValueError: If `region` is a :obj:`list` and does not consist of
four elements, or if `region` is not an acceptable region
CartoException: If `region` is not a table in user account
]
if call[name[isinstance], parameter[name[region], name[str]]] begin[:]
<ast.Try object at 0x7da2054a6800>
if compare[call[call[name[locals], parameter[]].get, parameter[constant[countrytag]]] is constant[None]] begin[:]
variable[countrytag] assign[=] constant[null]
if name[keywords] begin[:]
if call[name[isinstance], parameter[name[keywords], name[str]]] begin[:]
variable[keywords] assign[=] list[[<ast.Name object at 0x7da18eb553f0>]]
variable[kwsearch] assign[=] call[constant[ OR ].join, parameter[<ast.GeneratorExp object at 0x7da18eb55240>]]
variable[kwsearch] assign[=] call[constant[({})].format, parameter[name[kwsearch]]]
if name[regex] begin[:]
variable[regexsearch] assign[=] call[constant[(numer_description ~* {regex} OR numer_name ~* {regex})].format, parameter[]]
if <ast.BoolOp object at 0x7da18eb55d50> begin[:]
variable[subjectfilters] assign[=] call[call[constant[{kw} {op} {regex}].format, parameter[]].strip, parameter[]]
if <ast.BoolOp object at 0x7da18eb558d0> begin[:]
variable[time] assign[=] list[[<ast.Name object at 0x7da18eb55d20>]]
if <ast.BoolOp object at 0x7da18eb56320> begin[:]
variable[boundaries] assign[=] list[[<ast.Name object at 0x7da18eb57c40>]]
if <ast.BoolOp object at 0x7da18eb56440> begin[:]
variable[bt_filters] assign[=] constant[valid_geom AND valid_timespan]
if <ast.BoolOp object at 0x7da18eb55690> begin[:]
variable[filters] assign[=] call[constant[WHERE ({s}) AND ({bt})].format, parameter[]]
variable[quantiles] assign[=] <ast.IfExp object at 0x7da18eb56830>
variable[numer_query] assign[=] call[call[name[utils].minify_sql, parameter[tuple[[<ast.Constant object at 0x7da18eb54880>, <ast.Constant object at 0x7da18eb559f0>, <ast.Constant object at 0x7da18eb57460>, <ast.Constant object at 0x7da18eb57160>, <ast.Constant object at 0x7da18eb56d10>, <ast.Constant object at 0x7da18eb55390>, <ast.Constant object at 0x7da18eb54b80>, <ast.Constant object at 0x7da18dc98a90>, <ast.Constant object at 0x7da18dc9b4f0>, <ast.Constant object at 0x7da18dc9a560>, <ast.Constant object at 0x7da18dc989d0>, <ast.Constant object at 0x7da18dc9bfd0>, <ast.Constant object at 0x7da18dc9a290>]]]].strip, parameter[]]
variable[numers] assign[=] call[constant[
UNION
].join, parameter[<ast.GeneratorExp object at 0x7da18dc9bd00>]]
variable[query] assign[=] call[call[call[name[utils].minify_sql, parameter[tuple[[<ast.Constant object at 0x7da18dc9a740>, <ast.Constant object at 0x7da18dc98cd0>, <ast.Constant object at 0x7da18dc9aa40>, <ast.Constant object at 0x7da18dc993c0>, <ast.Constant object at 0x7da18dc9aa10>, <ast.Constant object at 0x7da18dc982b0>, <ast.Constant object at 0x7da18dc9a590>, <ast.Constant object at 0x7da18dc9b5e0>, <ast.Constant object at 0x7da18dc98460>, <ast.Constant object at 0x7da18dc9aaa0>, <ast.Constant object at 0x7da18dc9b0d0>, <ast.Constant object at 0x7da18dc9bac0>, <ast.Constant object at 0x7da18dc9b2b0>, <ast.Constant object at 0x7da18dc98c10>, <ast.Constant object at 0x7da18dc9b8b0>, <ast.Constant object at 0x7da18dc9a650>, <ast.Constant object at 0x7da18dc9a350>, <ast.Constant object at 0x7da18dc98be0>, <ast.Constant object at 0x7da18dc9b760>, <ast.Constant object at 0x7da18dc990c0>, <ast.Constant object at 0x7da18dc9ba00>, <ast.Constant object at 0x7da18dc998a0>, <ast.Constant object at 0x7da18dc98670>, <ast.Constant object at 0x7da18dc9af50>, <ast.Constant object at 0x7da18dc99180>, <ast.Constant object at 0x7da18dc9b400>, <ast.Constant object at 0x7da18dc9a3e0>, <ast.Constant object at 0x7da18dc9a080>, <ast.Constant object at 0x7da18dc981f0>, <ast.Constant object at 0x7da18dc99b40>, <ast.Constant object at 0x7da18dc99a80>, <ast.Constant object at 0x7da18dc98dc0>, <ast.Constant object at 0x7da18dc9b910>]]]].format, parameter[]].strip, parameter[]]
call[name[self]._debug_print, parameter[]]
return[call[name[self].fetch, parameter[name[query]]]] | keyword[def] identifier[data_discovery] ( identifier[self] , identifier[region] , identifier[keywords] = keyword[None] , identifier[regex] = keyword[None] , identifier[time] = keyword[None] ,
identifier[boundaries] = keyword[None] , identifier[include_quantiles] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[region] , identifier[str] ):
keyword[try] :
identifier[countrytag] = literal[string] . identifier[format] (
identifier[get_countrytag] ( identifier[region] ))
identifier[boundary] =( literal[string]
literal[string] )
keyword[except] identifier[ValueError] :
identifier[self] . identifier[sql_client] . identifier[send] (
literal[string] . identifier[format] ( identifier[region] ))
identifier[boundary] =(
literal[string]
literal[string] ). identifier[format] (
identifier[table_name] = identifier[region] )
keyword[elif] identifier[isinstance] ( identifier[region] , identifier[collections] . identifier[Iterable] ):
keyword[if] identifier[len] ( identifier[region] )!= literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[boundary] =( literal[string]
literal[string] . identifier[format] (* identifier[region] ))
keyword[if] identifier[locals] (). identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[countrytag] = literal[string]
keyword[if] identifier[keywords] :
keyword[if] identifier[isinstance] ( identifier[keywords] , identifier[str] ):
identifier[keywords] =[ identifier[keywords] ,]
identifier[kwsearch] = literal[string] . identifier[join] (
( literal[string]
literal[string] ). identifier[format] ( identifier[kw] = identifier[kw] )
keyword[for] identifier[kw] keyword[in] identifier[keywords] )
identifier[kwsearch] = literal[string] . identifier[format] ( identifier[kwsearch] )
keyword[if] identifier[regex] :
identifier[regexsearch] =( literal[string]
literal[string] ). identifier[format] ( identifier[regex] = identifier[utils] . identifier[pgquote] ( identifier[regex] ))
keyword[if] identifier[keywords] keyword[or] identifier[regex] :
identifier[subjectfilters] = literal[string] . identifier[format] (
identifier[kw] = identifier[kwsearch] keyword[if] identifier[keywords] keyword[else] literal[string] ,
identifier[op] = literal[string] keyword[if] ( identifier[keywords] keyword[and] identifier[regex] ) keyword[else] literal[string] ,
identifier[regex] = identifier[regexsearch] keyword[if] identifier[regex] keyword[else] literal[string] ). identifier[strip] ()
keyword[else] :
identifier[subjectfilters] = literal[string]
keyword[if] identifier[isinstance] ( identifier[time] , identifier[str] ) keyword[or] identifier[time] keyword[is] keyword[None] :
identifier[time] =[ identifier[time] ,]
keyword[if] identifier[isinstance] ( identifier[boundaries] , identifier[str] ) keyword[or] identifier[boundaries] keyword[is] keyword[None] :
identifier[boundaries] =[ identifier[boundaries] ,]
keyword[if] identifier[all] ( identifier[time] ) keyword[and] identifier[all] ( identifier[boundaries] ):
identifier[bt_filters] = literal[string]
keyword[elif] identifier[all] ( identifier[time] ) keyword[or] identifier[all] ( identifier[boundaries] ):
identifier[bt_filters] = literal[string] keyword[if] identifier[all] ( identifier[boundaries] ) keyword[else] literal[string]
keyword[else] :
identifier[bt_filters] = literal[string]
keyword[if] identifier[bt_filters] keyword[and] identifier[subjectfilters] :
identifier[filters] = literal[string] . identifier[format] (
identifier[s] = identifier[subjectfilters] , identifier[bt] = identifier[bt_filters] )
keyword[elif] identifier[bt_filters] keyword[or] identifier[subjectfilters] :
identifier[filters] = literal[string] . identifier[format] ( identifier[f] = identifier[subjectfilters] keyword[or] identifier[bt_filters] )
keyword[else] :
identifier[filters] = literal[string]
identifier[quantiles] =( literal[string]
keyword[if] keyword[not] identifier[include_quantiles] keyword[else] literal[string] )
identifier[numer_query] = identifier[utils] . identifier[minify_sql] ((
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,)). identifier[strip] ()
identifier[numers] = literal[string] . identifier[join] (
identifier[numer_query] . identifier[format] (
identifier[timespan] = identifier[utils] . identifier[pgquote] ( identifier[t] ),
identifier[geom_id] = identifier[utils] . identifier[pgquote] ( identifier[b] ),
identifier[normalization] = identifier[utils] . identifier[pgquote] ( identifier[n] ),
identifier[countrytag] = identifier[countrytag] ,
identifier[filters] = identifier[filters] )
keyword[for] identifier[t] keyword[in] identifier[time]
keyword[for] identifier[b] keyword[in] identifier[boundaries]
keyword[for] identifier[n] keyword[in] ( literal[string] , keyword[None] ))
identifier[query] = identifier[utils] . identifier[minify_sql] ((
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,)). identifier[format] (
identifier[boundary] = identifier[boundary] ,
identifier[numers] = identifier[numers] ,
identifier[quantiles] = identifier[quantiles] ). identifier[strip] ()
identifier[self] . identifier[_debug_print] ( identifier[query] = identifier[query] )
keyword[return] identifier[self] . identifier[fetch] ( identifier[query] , identifier[decode_geom] = keyword[True] ) | def data_discovery(self, region, keywords=None, regex=None, time=None, boundaries=None, include_quantiles=False):
"""Discover Data Observatory measures. This method returns the full
Data Observatory metadata model for each measure or measures that
match the conditions from the inputs. The full metadata in each row
uniquely defines a measure based on the timespan, geographic
resolution, and normalization (if any). Read more about the metadata
response in `Data Observatory
<https://carto.com/docs/carto-engine/data/measures-functions/#obs_getmetaextent-geometry-metadata-json-max_timespan_rank-max_score_rank-target_geoms>`__
documentation.
Internally, this method finds all measures in `region` that match the
conditions set in `keywords`, `regex`, `time`, and `boundaries` (if
any of them are specified). Then, if `boundaries` is not specified, a
geographical resolution for that measure will be chosen subject to the
type of region specified:
1. If `region` is a table name, then a geographical resolution that
is roughly equal to `region size / number of subunits`.
2. If `region` is a country name or bounding box, then a geographical
resolution will be chosen roughly equal to `region size / 500`.
Since different measures are in some geographic resolutions and not
others, different geographical resolutions for different measures are
oftentimes returned.
.. tip::
To remove the guesswork in how geographical resolutions are
selected, specify one or more boundaries in `boundaries`. See
the boundaries section for each region in the `Data Observatory
catalog <http://cartodb.github.io/bigmetadata/>`__.
The metadata returned from this method can then be used to create raw
tables or for augmenting an existing table from these measures using
:py:meth:`CartoContext.data <cartoframes.context.CartoContext.data>`.
For the full Data Observatory catalog, visit
https://cartodb.github.io/bigmetadata/. When working with the metadata
DataFrame returned from this method, be careful to only remove rows not
columns as `CartoContext.data <cartoframes.context.CartoContext.data>`
generally needs the full metadata.
.. note::
Narrowing down a discovery query using the `keywords`, `regex`, and
`time` filters is important for getting a manageable metadata
set. Besides there being a large number of measures in the DO, a
metadata response has acceptable combinations of measures with
demonimators (normalization and density), and the same measure from
other years.
For example, setting the region to be United States counties with
no filter values set will result in many thousands of measures.
Examples:
Get all European Union measures that mention ``freight``.
.. code::
meta = cc.data_discovery('European Union',
keywords='freight',
time='2010')
print(meta['numer_name'].values)
Arguments:
region (str or list of float): Information about the region of
interest. `region` can be one of three types:
- region name (str): Name of region of interest. Acceptable
values are limited to: 'Australia', 'Brazil', 'Canada',
'European Union', 'France', 'Mexico', 'Spain',
'United Kingdom', 'United States'.
- table name (str): Name of a table in user's CARTO account
with geometries. The region will be the bounding box of
the table.
.. Note:: If a table name is also a valid Data Observatory
region name, the Data Observatory name will be chosen
over the table.
- bounding box (list of float): List of four values (two
lng/lat pairs) in the following order: western longitude,
southern latitude, eastern longitude, and northern latitude.
For example, Switzerland fits in
``[5.9559111595,45.8179931641,10.4920501709,47.808380127]``
.. Note:: Geometry levels are generally chosen by subdividing
the region into the next smallest administrative unit. To
override this behavior, specify the `boundaries` flag. For
example, set `boundaries` to
``'us.census.tiger.census_tract'`` to choose US census
tracts.
keywords (str or list of str, optional): Keyword or list of
keywords in measure description or name. Response will be matched
on all keywords listed (boolean `or`).
regex (str, optional): A regular expression to search the measure
descriptions and names. Note that this relies on PostgreSQL's
case insensitive operator ``~*``. See `PostgreSQL docs
<https://www.postgresql.org/docs/9.5/static/functions-matching.html>`__
for more information.
boundaries (str or list of str, optional): Boundary or list of
boundaries that specify the measure resolution. See the
boundaries section for each region in the `Data Observatory
catalog <http://cartodb.github.io/bigmetadata/>`__.
include_quantiles (bool, optional): Include quantiles calculations
which are a calculation of how a measure compares to all measures
in the full dataset. Defaults to ``False``. If ``True``,
quantiles columns will be returned for each column which has it
pre-calculated.
Returns:
pandas.DataFrame: A dataframe of the complete metadata model for
specific measures based on the search parameters.
Raises:
ValueError: If `region` is a :obj:`list` and does not consist of
four elements, or if `region` is not an acceptable region
CartoException: If `region` is not a table in user account
"""
if isinstance(region, str):
try:
# see if it's a DO region, nest in {}
countrytag = "'{{{0}}}'".format(get_countrytag(region))
boundary = 'SELECT ST_MakeEnvelope(-180.0, -85.0, 180.0, 85.0, 4326) AS env, 500::int AS cnt' # depends on [control=['try'], data=[]]
except ValueError:
# TODO: make this work for general queries
# see if it's a table
self.sql_client.send('EXPLAIN SELECT * FROM {}'.format(region))
boundary = 'SELECT ST_SetSRID(ST_Extent(the_geom), 4326) AS env, count(*)::int AS cnt FROM {table_name}'.format(table_name=region) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(region, collections.Iterable):
if len(region) != 4:
raise ValueError('`region` should be a list of the geographic bounds of a region in the following order: western longitude, southern latitude, eastern longitude, and northern latitude. For example, Switerland fits in ``[5.9559111595,45.8179931641,10.4920501709,47.808380127]``.') # depends on [control=['if'], data=[]]
boundary = 'SELECT ST_MakeEnvelope({0}, {1}, {2}, {3}, 4326) AS env, 500::int AS cnt'.format(*region) # depends on [control=['if'], data=[]]
if locals().get('countrytag') is None:
countrytag = 'null' # depends on [control=['if'], data=[]]
if keywords:
if isinstance(keywords, str):
keywords = [keywords] # depends on [control=['if'], data=[]]
kwsearch = ' OR '.join(("numer_description ILIKE '%{kw}%' OR numer_name ILIKE '%{kw}%'".format(kw=kw) for kw in keywords))
kwsearch = '({})'.format(kwsearch) # depends on [control=['if'], data=[]]
if regex:
regexsearch = '(numer_description ~* {regex} OR numer_name ~* {regex})'.format(regex=utils.pgquote(regex)) # depends on [control=['if'], data=[]]
if keywords or regex:
subjectfilters = '{kw} {op} {regex}'.format(kw=kwsearch if keywords else '', op='OR' if keywords and regex else '', regex=regexsearch if regex else '').strip() # depends on [control=['if'], data=[]]
else:
subjectfilters = ''
if isinstance(time, str) or time is None:
time = [time] # depends on [control=['if'], data=[]]
if isinstance(boundaries, str) or boundaries is None:
boundaries = [boundaries] # depends on [control=['if'], data=[]]
if all(time) and all(boundaries):
bt_filters = 'valid_geom AND valid_timespan' # depends on [control=['if'], data=[]]
elif all(time) or all(boundaries):
bt_filters = 'valid_geom' if all(boundaries) else 'valid_timespan' # depends on [control=['if'], data=[]]
else:
bt_filters = ''
if bt_filters and subjectfilters:
filters = 'WHERE ({s}) AND ({bt})'.format(s=subjectfilters, bt=bt_filters) # depends on [control=['if'], data=[]]
elif bt_filters or subjectfilters:
filters = 'WHERE {f}'.format(f=subjectfilters or bt_filters) # depends on [control=['if'], data=[]]
else:
filters = ''
quantiles = "WHERE numer_aggregate <> 'quantile'" if not include_quantiles else '' # denom_id
numer_query = utils.minify_sql(('SELECT', ' numer_id,', ' {geom_id} AS geom_id,', ' {timespan} AS numer_timespan,', ' {normalization} AS normalization', ' FROM', ' OBS_GetAvailableNumerators(', ' (SELECT env FROM envelope),', ' {countrytag},', ' null,', ' {geom_id},', ' {timespan})', '{filters}')).strip()
# query all numerators for all `time`, `boundaries`, and raw/derived
numers = '\nUNION\n'.join((numer_query.format(timespan=utils.pgquote(t), geom_id=utils.pgquote(b), normalization=utils.pgquote(n), countrytag=countrytag, filters=filters) for t in time for b in boundaries for n in ('predenominated', None)))
query = utils.minify_sql(('WITH envelope AS (', ' {boundary}', '), numers AS (', ' {numers}', ')', 'SELECT *', 'FROM json_to_recordset(', ' (SELECT OBS_GetMeta(', ' envelope.env,', ' json_agg(numers),', ' 10, 10, envelope.cnt', ' ) AS meta', 'FROM numers, envelope', 'GROUP BY env, cnt)) as data(', ' denom_aggregate text, denom_colname text,', ' denom_description text, denom_geomref_colname text,', ' denom_id text, denom_name text, denom_reltype text,', ' denom_t_description text, denom_tablename text,', ' denom_type text, geom_colname text, geom_description text,', ' geom_geomref_colname text, geom_id text, geom_name text,', ' geom_t_description text, geom_tablename text,', ' geom_timespan text, geom_type text, id numeric,', ' max_score_rank text, max_timespan_rank text,', ' normalization text, num_geoms numeric, numer_aggregate text,', ' numer_colname text, numer_description text,', ' numer_geomref_colname text, numer_id text,', ' numer_name text, numer_t_description text,', ' numer_tablename text, numer_timespan text,', ' numer_type text, score numeric, score_rank numeric,', ' score_rownum numeric, suggested_name text,', ' target_area text, target_geoms text, timespan_rank numeric,', ' timespan_rownum numeric)', '{quantiles}')).format(boundary=boundary, numers=numers, quantiles=quantiles).strip()
self._debug_print(query=query)
return self.fetch(query, decode_geom=True) |
def destroy_sns_event(app_name, env, region):
""" Destroy all Lambda SNS subscriptions.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
Returns:
boolean: True if subscription destroyed successfully
"""
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_subscriptions = get_sns_subscriptions(app_name=app_name, env=env, region=region)
for subscription_arn in lambda_subscriptions:
sns_client.unsubscribe(SubscriptionArn=subscription_arn)
LOG.debug("Lambda SNS event deleted")
return True | def function[destroy_sns_event, parameter[app_name, env, region]]:
constant[ Destroy all Lambda SNS subscriptions.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
Returns:
boolean: True if subscription destroyed successfully
]
variable[session] assign[=] call[name[boto3].Session, parameter[]]
variable[sns_client] assign[=] call[name[session].client, parameter[constant[sns]]]
variable[lambda_subscriptions] assign[=] call[name[get_sns_subscriptions], parameter[]]
for taget[name[subscription_arn]] in starred[name[lambda_subscriptions]] begin[:]
call[name[sns_client].unsubscribe, parameter[]]
call[name[LOG].debug, parameter[constant[Lambda SNS event deleted]]]
return[constant[True]] | keyword[def] identifier[destroy_sns_event] ( identifier[app_name] , identifier[env] , identifier[region] ):
literal[string]
identifier[session] = identifier[boto3] . identifier[Session] ( identifier[profile_name] = identifier[env] , identifier[region_name] = identifier[region] )
identifier[sns_client] = identifier[session] . identifier[client] ( literal[string] )
identifier[lambda_subscriptions] = identifier[get_sns_subscriptions] ( identifier[app_name] = identifier[app_name] , identifier[env] = identifier[env] , identifier[region] = identifier[region] )
keyword[for] identifier[subscription_arn] keyword[in] identifier[lambda_subscriptions] :
identifier[sns_client] . identifier[unsubscribe] ( identifier[SubscriptionArn] = identifier[subscription_arn] )
identifier[LOG] . identifier[debug] ( literal[string] )
keyword[return] keyword[True] | def destroy_sns_event(app_name, env, region):
""" Destroy all Lambda SNS subscriptions.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
Returns:
boolean: True if subscription destroyed successfully
"""
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_subscriptions = get_sns_subscriptions(app_name=app_name, env=env, region=region)
for subscription_arn in lambda_subscriptions:
sns_client.unsubscribe(SubscriptionArn=subscription_arn) # depends on [control=['for'], data=['subscription_arn']]
LOG.debug('Lambda SNS event deleted')
return True |
def as_iso8601(self):
"""
example: 00:38:05.210Z
"""
if self.__time is None:
return None
return "%s:%s:%s0Z" % (self.__time[:2], self.__time[2:4], self.__time[4:]) | def function[as_iso8601, parameter[self]]:
constant[
example: 00:38:05.210Z
]
if compare[name[self].__time is constant[None]] begin[:]
return[constant[None]]
return[binary_operation[constant[%s:%s:%s0Z] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18fe92fb0>, <ast.Subscript object at 0x7da18fe93c10>, <ast.Subscript object at 0x7da18fe93e20>]]]] | keyword[def] identifier[as_iso8601] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__time] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] literal[string] %( identifier[self] . identifier[__time] [: literal[int] ], identifier[self] . identifier[__time] [ literal[int] : literal[int] ], identifier[self] . identifier[__time] [ literal[int] :]) | def as_iso8601(self):
"""
example: 00:38:05.210Z
"""
if self.__time is None:
return None # depends on [control=['if'], data=[]]
return '%s:%s:%s0Z' % (self.__time[:2], self.__time[2:4], self.__time[4:]) |
def query_job_status(self, submissionid):
"""
Queries vmray to check id a job was
:param submissionid: ID of the job/submission
:type submissionid: int
:returns: True if job finished, false if not
:rtype: bool
"""
apiurl = '/rest/submission/'
result = self.session.get('{}{}{}'.format(self.url, apiurl, submissionid))
if result.status_code == 200:
submission_info = json.loads(result.text)
if submission_info.get('data', {}).get('submission_finished', False): # Or something like that
return True
else:
raise UnknownSubmissionIdError('Submission id seems invalid, response was not HTTP 200.')
return False | def function[query_job_status, parameter[self, submissionid]]:
constant[
Queries vmray to check id a job was
:param submissionid: ID of the job/submission
:type submissionid: int
:returns: True if job finished, false if not
:rtype: bool
]
variable[apiurl] assign[=] constant[/rest/submission/]
variable[result] assign[=] call[name[self].session.get, parameter[call[constant[{}{}{}].format, parameter[name[self].url, name[apiurl], name[submissionid]]]]]
if compare[name[result].status_code equal[==] constant[200]] begin[:]
variable[submission_info] assign[=] call[name[json].loads, parameter[name[result].text]]
if call[call[name[submission_info].get, parameter[constant[data], dictionary[[], []]]].get, parameter[constant[submission_finished], constant[False]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[query_job_status] ( identifier[self] , identifier[submissionid] ):
literal[string]
identifier[apiurl] = literal[string]
identifier[result] = identifier[self] . identifier[session] . identifier[get] ( literal[string] . identifier[format] ( identifier[self] . identifier[url] , identifier[apiurl] , identifier[submissionid] ))
keyword[if] identifier[result] . identifier[status_code] == literal[int] :
identifier[submission_info] = identifier[json] . identifier[loads] ( identifier[result] . identifier[text] )
keyword[if] identifier[submission_info] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[False] ):
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[UnknownSubmissionIdError] ( literal[string] )
keyword[return] keyword[False] | def query_job_status(self, submissionid):
"""
Queries vmray to check id a job was
:param submissionid: ID of the job/submission
:type submissionid: int
:returns: True if job finished, false if not
:rtype: bool
"""
apiurl = '/rest/submission/'
result = self.session.get('{}{}{}'.format(self.url, apiurl, submissionid))
if result.status_code == 200:
submission_info = json.loads(result.text)
if submission_info.get('data', {}).get('submission_finished', False): # Or something like that
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise UnknownSubmissionIdError('Submission id seems invalid, response was not HTTP 200.')
return False |
def to_bytes(self):
"""Serialize the binder's annotations into a byte string."""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
lengths = [len(tokens) for tokens in self.tokens]
msg = {
"attrs": self.attrs,
"tokens": numpy.vstack(self.tokens).tobytes("C"),
"spaces": numpy.vstack(self.spaces).tobytes("C"),
"lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(self.strings),
}
return gzip.compress(srsly.msgpack_dumps(msg)) | def function[to_bytes, parameter[self]]:
constant[Serialize the binder's annotations into a byte string.]
for taget[name[tokens]] in starred[name[self].tokens] begin[:]
assert[compare[call[name[len], parameter[name[tokens].shape]] equal[==] constant[2]]]
variable[lengths] assign[=] <ast.ListComp object at 0x7da1b1e15240>
variable[msg] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e154b0>, <ast.Constant object at 0x7da1b1e15c30>, <ast.Constant object at 0x7da1b1e14760>, <ast.Constant object at 0x7da1b1e15030>, <ast.Constant object at 0x7da1b1e17a30>], [<ast.Attribute object at 0x7da1b1e17c70>, <ast.Call object at 0x7da1b1ee88b0>, <ast.Call object at 0x7da1b1eeadd0>, <ast.Call object at 0x7da1b1eeafb0>, <ast.Call object at 0x7da1b1eebe20>]]
return[call[name[gzip].compress, parameter[call[name[srsly].msgpack_dumps, parameter[name[msg]]]]]] | keyword[def] identifier[to_bytes] ( identifier[self] ):
literal[string]
keyword[for] identifier[tokens] keyword[in] identifier[self] . identifier[tokens] :
keyword[assert] identifier[len] ( identifier[tokens] . identifier[shape] )== literal[int] , identifier[tokens] . identifier[shape]
identifier[lengths] =[ identifier[len] ( identifier[tokens] ) keyword[for] identifier[tokens] keyword[in] identifier[self] . identifier[tokens] ]
identifier[msg] ={
literal[string] : identifier[self] . identifier[attrs] ,
literal[string] : identifier[numpy] . identifier[vstack] ( identifier[self] . identifier[tokens] ). identifier[tobytes] ( literal[string] ),
literal[string] : identifier[numpy] . identifier[vstack] ( identifier[self] . identifier[spaces] ). identifier[tobytes] ( literal[string] ),
literal[string] : identifier[numpy] . identifier[asarray] ( identifier[lengths] , identifier[dtype] = literal[string] ). identifier[tobytes] ( literal[string] ),
literal[string] : identifier[list] ( identifier[self] . identifier[strings] ),
}
keyword[return] identifier[gzip] . identifier[compress] ( identifier[srsly] . identifier[msgpack_dumps] ( identifier[msg] )) | def to_bytes(self):
"""Serialize the binder's annotations into a byte string."""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape # depends on [control=['for'], data=['tokens']]
lengths = [len(tokens) for tokens in self.tokens]
msg = {'attrs': self.attrs, 'tokens': numpy.vstack(self.tokens).tobytes('C'), 'spaces': numpy.vstack(self.spaces).tobytes('C'), 'lengths': numpy.asarray(lengths, dtype='int32').tobytes('C'), 'strings': list(self.strings)}
return gzip.compress(srsly.msgpack_dumps(msg)) |
def symb_to_block(symb, coupling = 'full'):
"""
Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints
"""
n = len(symb.snode) # order of block
Ncliques = len(symb.snpar) # number of cliques
# compute clique orders
dims = [symb.sncolptr[j+1]-symb.sncolptr[j] for j in range(Ncliques)]
# compute offsets in block-diagonal structure
offsets = [0]
for i in range(Ncliques): offsets.append(offsets[-1] + dims[i]**2)
constraints = [] # list of coupling constraints
sparse_to_block = {} # conversion dictionary
for k in range(Ncliques):
# map nonzeros in {Jk,Nk} part of clique k to block-diagonal structure
nodes = symb.snode[symb.snptr[k]:symb.snptr[k+1]]
rows = symb.snrowidx[symb.sncolptr[k]:symb.sncolptr[k+1]]
nk = len(nodes) # number of nodes in supernode
wk = len(rows) # number of nodes in clique
for j in range(nk):
for i in range(j,wk):
if i == j:
sparse_to_block[nodes[j]*n + rows[i]] = (offsets[k] + j*wk + i,)
else:
sparse_to_block[nodes[j]*n + rows[i]] =(offsets[k] + j*wk + i, offsets[k] + i*wk + j)
# add coupling constraints to list of constraints
if symb.snpar[k] == k: continue # skip if supernode k is a root supernode
p = symb.snpar[k]
np = len(symb.snode[symb.snptr[p]:symb.snptr[p+1]])
wp = symb.sncolptr[p+1] - symb.sncolptr[p]
ri = symb.relidx[symb.relptr[k]:symb.relptr[k+1]]
if type(coupling) is spmatrix:
tmp = coupling[rows[nk:],rows[nk:]]
for i,j in zip(tmp.I,tmp.J):
if j == i:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i]))
else:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i],
offsets[k] + (i+nk)*wk + j+nk,
offsets[p] + ri[i]*wp + ri[j]))
elif coupling == 'full':
for j in range(len(ri)):
for i in range(j,len(ri)):
if j == i:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i]))
else:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i],
offsets[k] + (i+nk)*wk + j+nk,
offsets[p] + ri[i]*wp + ri[j]))
return dims, sparse_to_block, constraints | def function[symb_to_block, parameter[symb, coupling]]:
constant[
Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints
]
variable[n] assign[=] call[name[len], parameter[name[symb].snode]]
variable[Ncliques] assign[=] call[name[len], parameter[name[symb].snpar]]
variable[dims] assign[=] <ast.ListComp object at 0x7da1b25361a0>
variable[offsets] assign[=] list[[<ast.Constant object at 0x7da1b2535cf0>]]
for taget[name[i]] in starred[call[name[range], parameter[name[Ncliques]]]] begin[:]
call[name[offsets].append, parameter[binary_operation[call[name[offsets]][<ast.UnaryOp object at 0x7da1b25365c0>] + binary_operation[call[name[dims]][name[i]] ** constant[2]]]]]
variable[constraints] assign[=] list[[]]
variable[sparse_to_block] assign[=] dictionary[[], []]
for taget[name[k]] in starred[call[name[range], parameter[name[Ncliques]]]] begin[:]
variable[nodes] assign[=] call[name[symb].snode][<ast.Slice object at 0x7da1b2536470>]
variable[rows] assign[=] call[name[symb].snrowidx][<ast.Slice object at 0x7da1b2536350>]
variable[nk] assign[=] call[name[len], parameter[name[nodes]]]
variable[wk] assign[=] call[name[len], parameter[name[rows]]]
for taget[name[j]] in starred[call[name[range], parameter[name[nk]]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[j], name[wk]]]] begin[:]
if compare[name[i] equal[==] name[j]] begin[:]
call[name[sparse_to_block]][binary_operation[binary_operation[call[name[nodes]][name[j]] * name[n]] + call[name[rows]][name[i]]]] assign[=] tuple[[<ast.BinOp object at 0x7da1b2554fd0>]]
if compare[call[name[symb].snpar][name[k]] equal[==] name[k]] begin[:]
continue
variable[p] assign[=] call[name[symb].snpar][name[k]]
variable[np] assign[=] call[name[len], parameter[call[name[symb].snode][<ast.Slice object at 0x7da1b2524f40>]]]
variable[wp] assign[=] binary_operation[call[name[symb].sncolptr][binary_operation[name[p] + constant[1]]] - call[name[symb].sncolptr][name[p]]]
variable[ri] assign[=] call[name[symb].relidx][<ast.Slice object at 0x7da1b2616620>]
if compare[call[name[type], parameter[name[coupling]]] is name[spmatrix]] begin[:]
variable[tmp] assign[=] call[name[coupling]][tuple[[<ast.Subscript object at 0x7da1b2615e40>, <ast.Subscript object at 0x7da1b2617550>]]]
for taget[tuple[[<ast.Name object at 0x7da1b2614490>, <ast.Name object at 0x7da1b2614280>]]] in starred[call[name[zip], parameter[name[tmp].I, name[tmp].J]]] begin[:]
if compare[name[j] equal[==] name[i]] begin[:]
call[name[constraints].append, parameter[tuple[[<ast.BinOp object at 0x7da1b2617c10>, <ast.BinOp object at 0x7da1b26157b0>]]]]
return[tuple[[<ast.Name object at 0x7da1b25924d0>, <ast.Name object at 0x7da1b2592d70>, <ast.Name object at 0x7da1b2592b00>]]] | keyword[def] identifier[symb_to_block] ( identifier[symb] , identifier[coupling] = literal[string] ):
literal[string]
identifier[n] = identifier[len] ( identifier[symb] . identifier[snode] )
identifier[Ncliques] = identifier[len] ( identifier[symb] . identifier[snpar] )
identifier[dims] =[ identifier[symb] . identifier[sncolptr] [ identifier[j] + literal[int] ]- identifier[symb] . identifier[sncolptr] [ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[Ncliques] )]
identifier[offsets] =[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[Ncliques] ): identifier[offsets] . identifier[append] ( identifier[offsets] [- literal[int] ]+ identifier[dims] [ identifier[i] ]** literal[int] )
identifier[constraints] =[]
identifier[sparse_to_block] ={}
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[Ncliques] ):
identifier[nodes] = identifier[symb] . identifier[snode] [ identifier[symb] . identifier[snptr] [ identifier[k] ]: identifier[symb] . identifier[snptr] [ identifier[k] + literal[int] ]]
identifier[rows] = identifier[symb] . identifier[snrowidx] [ identifier[symb] . identifier[sncolptr] [ identifier[k] ]: identifier[symb] . identifier[sncolptr] [ identifier[k] + literal[int] ]]
identifier[nk] = identifier[len] ( identifier[nodes] )
identifier[wk] = identifier[len] ( identifier[rows] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[nk] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[j] , identifier[wk] ):
keyword[if] identifier[i] == identifier[j] :
identifier[sparse_to_block] [ identifier[nodes] [ identifier[j] ]* identifier[n] + identifier[rows] [ identifier[i] ]]=( identifier[offsets] [ identifier[k] ]+ identifier[j] * identifier[wk] + identifier[i] ,)
keyword[else] :
identifier[sparse_to_block] [ identifier[nodes] [ identifier[j] ]* identifier[n] + identifier[rows] [ identifier[i] ]]=( identifier[offsets] [ identifier[k] ]+ identifier[j] * identifier[wk] + identifier[i] , identifier[offsets] [ identifier[k] ]+ identifier[i] * identifier[wk] + identifier[j] )
keyword[if] identifier[symb] . identifier[snpar] [ identifier[k] ]== identifier[k] : keyword[continue]
identifier[p] = identifier[symb] . identifier[snpar] [ identifier[k] ]
identifier[np] = identifier[len] ( identifier[symb] . identifier[snode] [ identifier[symb] . identifier[snptr] [ identifier[p] ]: identifier[symb] . identifier[snptr] [ identifier[p] + literal[int] ]])
identifier[wp] = identifier[symb] . identifier[sncolptr] [ identifier[p] + literal[int] ]- identifier[symb] . identifier[sncolptr] [ identifier[p] ]
identifier[ri] = identifier[symb] . identifier[relidx] [ identifier[symb] . identifier[relptr] [ identifier[k] ]: identifier[symb] . identifier[relptr] [ identifier[k] + literal[int] ]]
keyword[if] identifier[type] ( identifier[coupling] ) keyword[is] identifier[spmatrix] :
identifier[tmp] = identifier[coupling] [ identifier[rows] [ identifier[nk] :], identifier[rows] [ identifier[nk] :]]
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] ( identifier[tmp] . identifier[I] , identifier[tmp] . identifier[J] ):
keyword[if] identifier[j] == identifier[i] :
identifier[constraints] . identifier[append] (( identifier[offsets] [ identifier[k] ]+( identifier[j] + identifier[nk] )* identifier[wk] + identifier[i] + identifier[nk] ,
identifier[offsets] [ identifier[p] ]+ identifier[ri] [ identifier[j] ]* identifier[wp] + identifier[ri] [ identifier[i] ]))
keyword[else] :
identifier[constraints] . identifier[append] (( identifier[offsets] [ identifier[k] ]+( identifier[j] + identifier[nk] )* identifier[wk] + identifier[i] + identifier[nk] ,
identifier[offsets] [ identifier[p] ]+ identifier[ri] [ identifier[j] ]* identifier[wp] + identifier[ri] [ identifier[i] ],
identifier[offsets] [ identifier[k] ]+( identifier[i] + identifier[nk] )* identifier[wk] + identifier[j] + identifier[nk] ,
identifier[offsets] [ identifier[p] ]+ identifier[ri] [ identifier[i] ]* identifier[wp] + identifier[ri] [ identifier[j] ]))
keyword[elif] identifier[coupling] == literal[string] :
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[ri] )):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[j] , identifier[len] ( identifier[ri] )):
keyword[if] identifier[j] == identifier[i] :
identifier[constraints] . identifier[append] (( identifier[offsets] [ identifier[k] ]+( identifier[j] + identifier[nk] )* identifier[wk] + identifier[i] + identifier[nk] ,
identifier[offsets] [ identifier[p] ]+ identifier[ri] [ identifier[j] ]* identifier[wp] + identifier[ri] [ identifier[i] ]))
keyword[else] :
identifier[constraints] . identifier[append] (( identifier[offsets] [ identifier[k] ]+( identifier[j] + identifier[nk] )* identifier[wk] + identifier[i] + identifier[nk] ,
identifier[offsets] [ identifier[p] ]+ identifier[ri] [ identifier[j] ]* identifier[wp] + identifier[ri] [ identifier[i] ],
identifier[offsets] [ identifier[k] ]+( identifier[i] + identifier[nk] )* identifier[wk] + identifier[j] + identifier[nk] ,
identifier[offsets] [ identifier[p] ]+ identifier[ri] [ identifier[i] ]* identifier[wp] + identifier[ri] [ identifier[j] ]))
keyword[return] identifier[dims] , identifier[sparse_to_block] , identifier[constraints] | def symb_to_block(symb, coupling='full'):
"""
Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints
"""
n = len(symb.snode) # order of block
Ncliques = len(symb.snpar) # number of cliques
# compute clique orders
dims = [symb.sncolptr[j + 1] - symb.sncolptr[j] for j in range(Ncliques)]
# compute offsets in block-diagonal structure
offsets = [0]
for i in range(Ncliques):
offsets.append(offsets[-1] + dims[i] ** 2) # depends on [control=['for'], data=['i']]
constraints = [] # list of coupling constraints
sparse_to_block = {} # conversion dictionary
for k in range(Ncliques):
# map nonzeros in {Jk,Nk} part of clique k to block-diagonal structure
nodes = symb.snode[symb.snptr[k]:symb.snptr[k + 1]]
rows = symb.snrowidx[symb.sncolptr[k]:symb.sncolptr[k + 1]]
nk = len(nodes) # number of nodes in supernode
wk = len(rows) # number of nodes in clique
for j in range(nk):
for i in range(j, wk):
if i == j:
sparse_to_block[nodes[j] * n + rows[i]] = (offsets[k] + j * wk + i,) # depends on [control=['if'], data=['i', 'j']]
else:
sparse_to_block[nodes[j] * n + rows[i]] = (offsets[k] + j * wk + i, offsets[k] + i * wk + j) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']]
# add coupling constraints to list of constraints
if symb.snpar[k] == k:
continue # skip if supernode k is a root supernode # depends on [control=['if'], data=[]]
p = symb.snpar[k]
np = len(symb.snode[symb.snptr[p]:symb.snptr[p + 1]])
wp = symb.sncolptr[p + 1] - symb.sncolptr[p]
ri = symb.relidx[symb.relptr[k]:symb.relptr[k + 1]]
if type(coupling) is spmatrix:
tmp = coupling[rows[nk:], rows[nk:]]
for (i, j) in zip(tmp.I, tmp.J):
if j == i:
constraints.append((offsets[k] + (j + nk) * wk + i + nk, offsets[p] + ri[j] * wp + ri[i])) # depends on [control=['if'], data=['j', 'i']]
else:
constraints.append((offsets[k] + (j + nk) * wk + i + nk, offsets[p] + ri[j] * wp + ri[i], offsets[k] + (i + nk) * wk + j + nk, offsets[p] + ri[i] * wp + ri[j])) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif coupling == 'full':
for j in range(len(ri)):
for i in range(j, len(ri)):
if j == i:
constraints.append((offsets[k] + (j + nk) * wk + i + nk, offsets[p] + ri[j] * wp + ri[i])) # depends on [control=['if'], data=['j', 'i']]
else:
constraints.append((offsets[k] + (j + nk) * wk + i + nk, offsets[p] + ri[j] * wp + ri[i], offsets[k] + (i + nk) * wk + j + nk, offsets[p] + ri[i] * wp + ri[j])) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
return (dims, sparse_to_block, constraints) |
def initialize():
"""Initialize the application configuration, adding any missing default configuration or roles
Returns:
`None`
"""
global __initialized
if __initialized:
return
# Setup all the default base settings
try:
for data in DEFAULT_CONFIG_OPTIONS:
nsobj = _get_config_namespace(data['prefix'], data['name'], sort_order=data['sort_order'])
for opt in data['options']:
_register_default_option(nsobj, opt)
db.session.add(nsobj)
# Iterate over all of our plugins and setup their defaults
for ns, info in CINQ_PLUGINS.items():
if info['name'] == 'commands':
continue
for entry_point in info['plugins']:
_cls = entry_point.load()
if hasattr(_cls, 'ns'):
ns_name = '{}: {}'.format(info['name'].capitalize(), _cls.name)
if not isinstance(_cls.options, abstractproperty):
nsobj = _get_config_namespace(_cls.ns, ns_name)
if _cls.options:
for opt in _cls.options:
_register_default_option(nsobj, opt)
db.session.add(nsobj)
# Create the default roles if they are missing and import any missing or updated templates,
# if they havent been modified by the user
_add_default_roles()
_import_templates()
db.session.commit()
dbconfig.reload_data()
__initialized = True
except ProgrammingError as ex:
if str(ex).find('1146') != -1:
logging.getLogger('cloud_inquisitor').error(
'Missing required tables, please make sure you run `cloud-inquisitor db upgrade`'
) | def function[initialize, parameter[]]:
constant[Initialize the application configuration, adding any missing default configuration or roles
Returns:
`None`
]
<ast.Global object at 0x7da1b20aa680>
if name[__initialized] begin[:]
return[None]
<ast.Try object at 0x7da1b20a9540> | keyword[def] identifier[initialize] ():
literal[string]
keyword[global] identifier[__initialized]
keyword[if] identifier[__initialized] :
keyword[return]
keyword[try] :
keyword[for] identifier[data] keyword[in] identifier[DEFAULT_CONFIG_OPTIONS] :
identifier[nsobj] = identifier[_get_config_namespace] ( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[sort_order] = identifier[data] [ literal[string] ])
keyword[for] identifier[opt] keyword[in] identifier[data] [ literal[string] ]:
identifier[_register_default_option] ( identifier[nsobj] , identifier[opt] )
identifier[db] . identifier[session] . identifier[add] ( identifier[nsobj] )
keyword[for] identifier[ns] , identifier[info] keyword[in] identifier[CINQ_PLUGINS] . identifier[items] ():
keyword[if] identifier[info] [ literal[string] ]== literal[string] :
keyword[continue]
keyword[for] identifier[entry_point] keyword[in] identifier[info] [ literal[string] ]:
identifier[_cls] = identifier[entry_point] . identifier[load] ()
keyword[if] identifier[hasattr] ( identifier[_cls] , literal[string] ):
identifier[ns_name] = literal[string] . identifier[format] ( identifier[info] [ literal[string] ]. identifier[capitalize] (), identifier[_cls] . identifier[name] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[_cls] . identifier[options] , identifier[abstractproperty] ):
identifier[nsobj] = identifier[_get_config_namespace] ( identifier[_cls] . identifier[ns] , identifier[ns_name] )
keyword[if] identifier[_cls] . identifier[options] :
keyword[for] identifier[opt] keyword[in] identifier[_cls] . identifier[options] :
identifier[_register_default_option] ( identifier[nsobj] , identifier[opt] )
identifier[db] . identifier[session] . identifier[add] ( identifier[nsobj] )
identifier[_add_default_roles] ()
identifier[_import_templates] ()
identifier[db] . identifier[session] . identifier[commit] ()
identifier[dbconfig] . identifier[reload_data] ()
identifier[__initialized] = keyword[True]
keyword[except] identifier[ProgrammingError] keyword[as] identifier[ex] :
keyword[if] identifier[str] ( identifier[ex] ). identifier[find] ( literal[string] )!=- literal[int] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[error] (
literal[string]
) | def initialize():
"""Initialize the application configuration, adding any missing default configuration or roles
Returns:
`None`
"""
global __initialized
if __initialized:
return # depends on [control=['if'], data=[]]
# Setup all the default base settings
try:
for data in DEFAULT_CONFIG_OPTIONS:
nsobj = _get_config_namespace(data['prefix'], data['name'], sort_order=data['sort_order'])
for opt in data['options']:
_register_default_option(nsobj, opt) # depends on [control=['for'], data=['opt']]
db.session.add(nsobj) # depends on [control=['for'], data=['data']]
# Iterate over all of our plugins and setup their defaults
for (ns, info) in CINQ_PLUGINS.items():
if info['name'] == 'commands':
continue # depends on [control=['if'], data=[]]
for entry_point in info['plugins']:
_cls = entry_point.load()
if hasattr(_cls, 'ns'):
ns_name = '{}: {}'.format(info['name'].capitalize(), _cls.name)
if not isinstance(_cls.options, abstractproperty):
nsobj = _get_config_namespace(_cls.ns, ns_name)
if _cls.options:
for opt in _cls.options:
_register_default_option(nsobj, opt) # depends on [control=['for'], data=['opt']] # depends on [control=['if'], data=[]]
db.session.add(nsobj) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry_point']] # depends on [control=['for'], data=[]]
# Create the default roles if they are missing and import any missing or updated templates,
# if they havent been modified by the user
_add_default_roles()
_import_templates()
db.session.commit()
dbconfig.reload_data()
__initialized = True # depends on [control=['try'], data=[]]
except ProgrammingError as ex:
if str(ex).find('1146') != -1:
logging.getLogger('cloud_inquisitor').error('Missing required tables, please make sure you run `cloud-inquisitor db upgrade`') # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] |
def split_bodies(dataset, label=False):
"""Find, label, and split connected bodies/volumes. This splits
different connected bodies into blocks in a MultiBlock dataset.
Parameters
----------
label : bool
A flag on whether to keep the ID arrays given by the
``connectivity`` filter.
"""
# Get the connectivity and label different bodies
labeled = dataset.connectivity()
classifier = labeled.cell_arrays['RegionId']
bodies = vtki.MultiBlock()
for vid in np.unique(classifier):
# Now extract it:
b = labeled.threshold([vid-0.5, vid+0.5], scalars='RegionId')
if not label:
# strange behavior:
# must use this method rather than deleting from the point_arrays
# or else object is collected.
b._remove_cell_scalar('RegionId')
b._remove_point_scalar('RegionId')
bodies.append(b)
return bodies | def function[split_bodies, parameter[dataset, label]]:
constant[Find, label, and split connected bodies/volumes. This splits
different connected bodies into blocks in a MultiBlock dataset.
Parameters
----------
label : bool
A flag on whether to keep the ID arrays given by the
``connectivity`` filter.
]
variable[labeled] assign[=] call[name[dataset].connectivity, parameter[]]
variable[classifier] assign[=] call[name[labeled].cell_arrays][constant[RegionId]]
variable[bodies] assign[=] call[name[vtki].MultiBlock, parameter[]]
for taget[name[vid]] in starred[call[name[np].unique, parameter[name[classifier]]]] begin[:]
variable[b] assign[=] call[name[labeled].threshold, parameter[list[[<ast.BinOp object at 0x7da1b26af4c0>, <ast.BinOp object at 0x7da1b26ae200>]]]]
if <ast.UnaryOp object at 0x7da1b26adb40> begin[:]
call[name[b]._remove_cell_scalar, parameter[constant[RegionId]]]
call[name[b]._remove_point_scalar, parameter[constant[RegionId]]]
call[name[bodies].append, parameter[name[b]]]
return[name[bodies]] | keyword[def] identifier[split_bodies] ( identifier[dataset] , identifier[label] = keyword[False] ):
literal[string]
identifier[labeled] = identifier[dataset] . identifier[connectivity] ()
identifier[classifier] = identifier[labeled] . identifier[cell_arrays] [ literal[string] ]
identifier[bodies] = identifier[vtki] . identifier[MultiBlock] ()
keyword[for] identifier[vid] keyword[in] identifier[np] . identifier[unique] ( identifier[classifier] ):
identifier[b] = identifier[labeled] . identifier[threshold] ([ identifier[vid] - literal[int] , identifier[vid] + literal[int] ], identifier[scalars] = literal[string] )
keyword[if] keyword[not] identifier[label] :
identifier[b] . identifier[_remove_cell_scalar] ( literal[string] )
identifier[b] . identifier[_remove_point_scalar] ( literal[string] )
identifier[bodies] . identifier[append] ( identifier[b] )
keyword[return] identifier[bodies] | def split_bodies(dataset, label=False):
"""Find, label, and split connected bodies/volumes. This splits
different connected bodies into blocks in a MultiBlock dataset.
Parameters
----------
label : bool
A flag on whether to keep the ID arrays given by the
``connectivity`` filter.
"""
# Get the connectivity and label different bodies
labeled = dataset.connectivity()
classifier = labeled.cell_arrays['RegionId']
bodies = vtki.MultiBlock()
for vid in np.unique(classifier):
# Now extract it:
b = labeled.threshold([vid - 0.5, vid + 0.5], scalars='RegionId')
if not label:
# strange behavior:
# must use this method rather than deleting from the point_arrays
# or else object is collected.
b._remove_cell_scalar('RegionId')
b._remove_point_scalar('RegionId') # depends on [control=['if'], data=[]]
bodies.append(b) # depends on [control=['for'], data=['vid']]
return bodies |
def plot_periodicvar_recovery_results(
precvar_results,
aliases_count_as_recovered=None,
magbins=None,
periodbins=None,
amplitudebins=None,
ndetbins=None,
minbinsize=1,
plotfile_ext='png',
):
'''This plots the results of periodic var recovery.
This function makes plots for periodicvar recovered fraction as a function
of:
- magbin
- periodbin
- amplitude of variability
- ndet
with plot lines broken down by:
- magcol
- periodfinder
- vartype
- recovery status
The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be
used to set the bin lists as needed. The kwarg `minbinsize` controls how
many elements per bin are required to accept a bin in processing its
recovery characteristics for mags, periods, amplitudes, and ndets.
Parameters
----------
precvar_results : dict or str
This is either a dict returned by parallel_periodicvar_recovery or the
pickle created by that function.
aliases_count_as_recovered : list of str or 'all'
This is used to set which kinds of aliases this function considers as
'recovered' objects. Normally, we require that recovered objects have a
recovery status of 'actual' to indicate the actual period was
recovered. To change this default behavior, aliases_count_as_recovered
can be set to a list of alias status strings that should be considered
as 'recovered' objects as well. Choose from the following alias types::
'twice' recovered_p = 2.0*actual_p
'half' recovered_p = 0.5*actual_p
'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)
'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)
'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)
'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)
'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)
'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)
'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)
'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)
or set `aliases_count_as_recovered='all'` to include all of the above in
the 'recovered' periodic var list.
magbins : np.array
The magnitude bins to plot the recovery rate results over. If None, the
default mag bins will be used: `np.arange(8.0,16.25,0.25)`.
periodbins : np.array
The period bins to plot the recovery rate results over. If None, the
default period bins will be used: `np.arange(0.0,500.0,0.5)`.
amplitudebins : np.array
The variability amplitude bins to plot the recovery rate results
over. If None, the default amplitude bins will be used:
`np.arange(0.0,2.0,0.05)`.
ndetbins : np.array
The ndet bins to plot the recovery rate results over. If None, the
default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`.
minbinsize : int
The minimum number of objects per bin required to plot a bin and its
recovery fraction on the plot.
plotfile_ext : {'png','pdf'}
Sets the plot output files' extension.
Returns
-------
dict
A dict containing recovery fraction statistics and the paths to each of
the plots made.
'''
# get the result pickle/dict
if isinstance(precvar_results, str) and os.path.exists(precvar_results):
with open(precvar_results,'rb') as infd:
precvar = pickle.load(infd)
elif isinstance(precvar_results, dict):
precvar = precvar_results
else:
LOGERROR('could not understand the input '
'periodic var recovery dict/pickle')
return None
# get the simbasedir and open the fakelc-info.pkl. we'll need the magbins
# definition from here.
simbasedir = precvar['simbasedir']
lcinfof = os.path.join(simbasedir,'fakelcs-info.pkl')
if not os.path.exists(lcinfof):
LOGERROR('fakelcs-info.pkl does not exist in %s, can\'t continue' %
simbasedir)
return None
with open(lcinfof,'rb') as infd:
lcinfo = pickle.load(infd)
# get the magcols, vartypes, sdssr, isvariable flags
magcols = lcinfo['magcols']
objectid = lcinfo['objectid']
ndet = lcinfo['ndet']
sdssr = lcinfo['sdssr']
# get the actual periodic vars
actual_periodicvars = precvar['actual_periodicvars']
# generate lists of objects binned by magbins and periodbins
LOGINFO('getting sdssr and ndet for actual periodic vars...')
# get the sdssr and ndet for all periodic vars
periodicvar_sdssr = []
periodicvar_ndet = []
periodicvar_objectids = []
for pobj in actual_periodicvars:
pobjind = objectid == pobj
periodicvar_objectids.append(pobj)
periodicvar_sdssr.append(sdssr[pobjind])
periodicvar_ndet.append(ndet[pobjind])
periodicvar_sdssr = np.array(periodicvar_sdssr)
periodicvar_objectids = np.array(periodicvar_objectids)
periodicvar_ndet = np.array(periodicvar_ndet)
LOGINFO('getting periods, vartypes, '
'amplitudes, ndet for actual periodic vars...')
# get the periods, vartypes, amplitudes for the actual periodic vars
periodicvar_periods = [
np.asscalar(precvar['details'][x]['actual_varperiod'])
for x in periodicvar_objectids
]
periodicvar_amplitudes = [
np.asscalar(precvar['details'][x]['actual_varamplitude'])
for x in periodicvar_objectids
]
periodicvar_vartypes = [
precvar['details'][x]['actual_vartype'] for x in periodicvar_objectids
]
#
# do the binning
#
# bin by mag
LOGINFO('binning actual periodic vars by magnitude...')
magbinned_sdssr = []
magbinned_periodicvars = []
if not magbins:
magbins = PERIODREC_DEFAULT_MAGBINS
magbininds = np.digitize(np.ravel(periodicvar_sdssr), magbins)
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbin_periodicvars = periodicvar_objectids[magbininds == mbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
magbinned_sdssr.append((magbins[magi] + magbins[magi+1])/2.0)
magbinned_periodicvars.append(thisbin_periodicvars)
# bin by period
LOGINFO('binning actual periodic vars by period...')
periodbinned_periods = []
periodbinned_periodicvars = []
if not periodbins:
periodbins = PERIODREC_DEFAULT_PERIODBINS
periodbininds = np.digitize(np.ravel(periodicvar_periods), periodbins)
for pbinind, peri in zip(np.unique(periodbininds),
range(len(periodbins)-1)):
thisbin_periodicvars = periodicvar_objectids[periodbininds == pbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
periodbinned_periods.append((periodbins[peri] +
periodbins[peri+1])/2.0)
periodbinned_periodicvars.append(thisbin_periodicvars)
# bin by amplitude of variability
LOGINFO('binning actual periodic vars by variability amplitude...')
amplitudebinned_amplitudes = []
amplitudebinned_periodicvars = []
if not amplitudebins:
amplitudebins = PERIODREC_DEFAULT_AMPBINS
amplitudebininds = np.digitize(np.ravel(np.abs(periodicvar_amplitudes)),
amplitudebins)
for abinind, ampi in zip(np.unique(amplitudebininds),
range(len(amplitudebins)-1)):
thisbin_periodicvars = periodicvar_objectids[
amplitudebininds == abinind
]
if (thisbin_periodicvars.size > (minbinsize-1)):
amplitudebinned_amplitudes.append(
(amplitudebins[ampi] +
amplitudebins[ampi+1])/2.0
)
amplitudebinned_periodicvars.append(thisbin_periodicvars)
# bin by ndet
LOGINFO('binning actual periodic vars by ndet...')
ndetbinned_ndets = []
ndetbinned_periodicvars = []
if not ndetbins:
ndetbins = PERIODREC_DEFAULT_NDETBINS
ndetbininds = np.digitize(np.ravel(periodicvar_ndet), ndetbins)
for nbinind, ndeti in zip(np.unique(ndetbininds),
range(len(ndetbins)-1)):
thisbin_periodicvars = periodicvar_objectids[ndetbininds == nbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
ndetbinned_ndets.append(
(ndetbins[ndeti] +
ndetbins[ndeti+1])/2.0
)
ndetbinned_periodicvars.append(thisbin_periodicvars)
# now figure out what 'recovered' means using the provided
# aliases_count_as_recovered kwarg
recovered_status = ['actual']
if isinstance(aliases_count_as_recovered, list):
for atype in aliases_count_as_recovered:
if atype in ALIAS_TYPES:
recovered_status.append(atype)
else:
LOGWARNING('unknown alias type: %s, skipping' % atype)
elif aliases_count_as_recovered and aliases_count_as_recovered == 'all':
for atype in ALIAS_TYPES[1:]:
recovered_status.append(atype)
# find all the matching objects for these recovered statuses
recovered_periodicvars = np.array(
[precvar['details'][x]['objectid'] for x in precvar['details']
if (precvar['details'][x] is not None and
precvar['details'][x]['best_recovered_status']
in recovered_status)],
dtype=np.unicode_
)
LOGINFO('recovered %s/%s periodic variables (frac: %.3f) with '
'period recovery status: %s' %
(recovered_periodicvars.size,
actual_periodicvars.size,
float(recovered_periodicvars.size/actual_periodicvars.size),
', '.join(recovered_status)))
# get the objects recovered per bin and overall recovery fractions per bin
magbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in magbinned_periodicvars
]
magbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(magbinned_recovered_objects,
magbinned_periodicvars)])
periodbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in periodbinned_periodicvars
]
periodbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(periodbinned_recovered_objects,
periodbinned_periodicvars)])
amplitudebinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in amplitudebinned_periodicvars
]
amplitudebinned_recfrac = np.array(
[float(x.size/y.size) for x,y
in zip(amplitudebinned_recovered_objects,
amplitudebinned_periodicvars)]
)
ndetbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in ndetbinned_periodicvars
]
ndetbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(ndetbinned_recovered_objects,
ndetbinned_periodicvars)])
# convert the bin medians to arrays
magbinned_sdssr = np.array(magbinned_sdssr)
periodbinned_periods = np.array(periodbinned_periods)
amplitudebinned_amplitudes = np.array(amplitudebinned_amplitudes)
ndetbinned_ndets = np.array(ndetbinned_ndets)
# this is the initial output dict
outdict = {
'simbasedir':simbasedir,
'precvar_results':precvar,
'magcols':magcols,
'objectids':objectid,
'ndet':ndet,
'sdssr':sdssr,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'recovery_definition':recovered_status,
# mag binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'magbins':magbins,
'magbinned_mags':magbinned_sdssr,
'magbinned_periodicvars':magbinned_periodicvars,
'magbinned_recoveredvars':magbinned_recovered_objects,
'magbinned_recfrac':magbinned_recfrac,
# period binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'periodbins':periodbins,
'periodbinned_periods':periodbinned_periods,
'periodbinned_periodicvars':periodbinned_periodicvars,
'periodbinned_recoveredvars':periodbinned_recovered_objects,
'periodbinned_recfrac':periodbinned_recfrac,
# amplitude binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'amplitudebins':amplitudebins,
'amplitudebinned_amplitudes':amplitudebinned_amplitudes,
'amplitudebinned_periodicvars':amplitudebinned_periodicvars,
'amplitudebinned_recoveredvars':amplitudebinned_recovered_objects,
'amplitudebinned_recfrac':amplitudebinned_recfrac,
# ndet binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'ndetbins':ndetbins,
'ndetbinned_ndets':ndetbinned_ndets,
'ndetbinned_periodicvars':ndetbinned_periodicvars,
'ndetbinned_recoveredvars':ndetbinned_recovered_objects,
'ndetbinned_recfrac':ndetbinned_recfrac,
}
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
# figure out all alias types
all_aliastypes = recovered_status
# add these to the outdict
outdict['aliastypes'] = all_aliastypes
outdict['pfmethods'] = all_pfmethods
outdict['vartypes'] = all_vartypes
# these are recfracs per-magcol, -vartype, -periodfinder, -aliastype
# binned appropriately by mags, periods, amplitudes, and ndet
# all of these have the shape as the magcols, aliastypes, pfmethods, and
# vartypes lists above.
magbinned_per_magcol_recfracs = []
magbinned_per_vartype_recfracs = []
magbinned_per_pfmethod_recfracs = []
magbinned_per_aliastype_recfracs = []
periodbinned_per_magcol_recfracs = []
periodbinned_per_vartype_recfracs = []
periodbinned_per_pfmethod_recfracs = []
periodbinned_per_aliastype_recfracs = []
amplitudebinned_per_magcol_recfracs = []
amplitudebinned_per_vartype_recfracs = []
amplitudebinned_per_pfmethod_recfracs = []
amplitudebinned_per_aliastype_recfracs = []
ndetbinned_per_magcol_recfracs = []
ndetbinned_per_vartype_recfracs = []
ndetbinned_per_pfmethod_recfracs = []
ndetbinned_per_aliastype_recfracs = []
#
# finally, we do stuff for the plots!
#
recplotdir = os.path.join(simbasedir, 'periodic-variable-recovery-plots')
if not os.path.exists(recplotdir):
os.mkdir(recplotdir)
# 1. recovery-rate by magbin
# 1a. plot of overall recovery rate per magbin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(magbinned_sdssr, magbinned_recfrac,marker='.',ms=0.0)
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1b. plot of recovery rate per magbin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
magbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
magbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1c. plot of recovery rate per magbin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
magbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
magbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1d. plot of recovery rate per magbin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
magbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
magbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1e. plot of recovery rate per magbin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all alias types
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thisat_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
magbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
magbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2. recovery-rate by periodbin
# 2a. plot of overall recovery rate per periodbin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable period [days]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2b. plot of recovery rate per periodbin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
periodbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
periodbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2c. plot of recovery rate per periodbin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
periodbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
periodbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2d. plot of recovery rate per periodbin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
periodbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
periodbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2e. plot of recovery rate per periodbin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for periodbin_pv, periodbin_rv in zip(
periodbinned_periodicvars,
periodbinned_recovered_objects
):
thisbin_thisat_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
periodbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
periodbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3. recovery-rate by amplitude bin
# 3a. plot of overall recovery rate per amplitude bin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable amplitude [mag]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3b. plot of recovery rate per amplitude bin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thismagcol_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
amplitudebin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_magcol_recfracs.append(
np.array(thismagcol_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3c. plot of recovery rate per amplitude bin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thispf_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
amplitudebin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
amplitudebinned_per_pfmethod_recfracs.append(
np.array(thispf_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3d. plot of recovery rate per amplitude bin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thisvt_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
amplitudebin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_vartype_recfracs.append(
np.array(thisvt_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3e. plot of recovery rate per amplitude bin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thisat_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
amplitudebin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_aliastype_recfracs.append(
np.array(thisat_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4. recovery-rate by ndet bin
# 4a. plot of overall recovery rate per ndet bin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable light curve points')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var ndet')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4b. plot of recovery rate per ndet bin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
ndetbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
ndetbinned_per_magcol_recfracs.append(
np.array(thismagcol_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4c. plot of recovery rate per ndet bin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
ndetbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
ndetbinned_per_pfmethod_recfracs.append(
np.array(thispf_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4d. plot of recovery rate per ndet bin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
ndetbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
ndetbinned_per_vartype_recfracs.append(
np.array(thisvt_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4e. plot of recovery rate per ndet bin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thisat_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
ndetbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
ndetbinned_per_aliastype_recfracs.append(
np.array(thisat_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# update the lists in the outdict
outdict['magbinned_per_magcol_recfracs'] = (
magbinned_per_magcol_recfracs
)
outdict['magbinned_per_pfmethod_recfracs'] = (
magbinned_per_pfmethod_recfracs
)
outdict['magbinned_per_vartype_recfracs'] = (
magbinned_per_vartype_recfracs
)
outdict['magbinned_per_aliastype_recfracs'] = (
magbinned_per_aliastype_recfracs
)
outdict['periodbinned_per_magcol_recfracs'] = (
periodbinned_per_magcol_recfracs
)
outdict['periodbinned_per_pfmethod_recfracs'] = (
periodbinned_per_pfmethod_recfracs
)
outdict['periodbinned_per_vartype_recfracs'] = (
periodbinned_per_vartype_recfracs
)
outdict['periodbinned_per_aliastype_recfracs'] = (
periodbinned_per_aliastype_recfracs
)
outdict['amplitudebinned_per_magcol_recfracs'] = (
amplitudebinned_per_magcol_recfracs
)
outdict['amplitudebinned_per_pfmethod_recfracs'] = (
amplitudebinned_per_pfmethod_recfracs
)
outdict['amplitudebinned_per_vartype_recfracs'] = (
amplitudebinned_per_vartype_recfracs
)
outdict['amplitudebinned_per_aliastype_recfracs'] = (
amplitudebinned_per_aliastype_recfracs
)
outdict['ndetbinned_per_magcol_recfracs'] = (
ndetbinned_per_magcol_recfracs
)
outdict['ndetbinned_per_pfmethod_recfracs'] = (
ndetbinned_per_pfmethod_recfracs
)
outdict['ndetbinned_per_vartype_recfracs'] = (
ndetbinned_per_vartype_recfracs
)
outdict['ndetbinned_per_aliastype_recfracs'] = (
ndetbinned_per_aliastype_recfracs
)
# get the overall recovered vars per pfmethod
overall_recvars_per_pfmethod = []
for pfm in all_pfmethods:
thispfm_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_pfmethod'] == pfm))
])
overall_recvars_per_pfmethod.append(thispfm_recvars)
# get the overall recovered vars per vartype
overall_recvars_per_vartype = []
for vt in all_vartypes:
thisvt_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['actual_vartype'] == vt))
])
overall_recvars_per_vartype.append(thisvt_recvars)
# get the overall recovered vars per magcol
overall_recvars_per_magcol = []
for mc in magcols:
thismc_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_magcol'] == mc))
])
overall_recvars_per_magcol.append(thismc_recvars)
# get the overall recovered vars per aliastype
overall_recvars_per_aliastype = []
for at in all_aliastypes:
thisat_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_status'] == at))
])
overall_recvars_per_aliastype.append(thisat_recvars)
# update the outdict with these
outdict['overall_recfrac_per_pfmethod'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_pfmethod
])
outdict['overall_recfrac_per_vartype'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_vartype
])
outdict['overall_recfrac_per_magcol'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_magcol
])
outdict['overall_recfrac_per_aliastype'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_aliastype
])
# 5. bar plot of overall recovery rate per pfmethod
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_pfmethods))
xl = all_pfmethods
plt.barh(xt, outdict['overall_recfrac_per_pfmethod'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('period-finding method')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period-finding method')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 6. bar plot of overall recovery rate per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(magcols))
xl = magcols
plt.barh(xt, outdict['overall_recfrac_per_magcol'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('light curve magnitude column')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per light curve magcol')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-magcol.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 7. bar plot of overall recovery rate per aliastype
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_aliastypes))
xl = all_aliastypes
plt.barh(xt, outdict['overall_recfrac_per_aliastype'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('period recovery status')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period recovery status')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 8. bar plot of overall recovery rate per vartype
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_vartypes))
xl = all_vartypes
plt.barh(xt, outdict['overall_recfrac_per_vartype'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('periodic variable type')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per periodic variable type')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 9. overall recovered period periodogram for objects that aren't actual
# periodic variables. this effectively should give us the window function of
# the observations
notvariable_recovered_periods = np.concatenate([
precvar['details'][x]['recovery_periods']
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is None)
])
notvariable_recovered_lspvals = np.concatenate([
precvar['details'][x]['recovery_lspvals']
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is None)
])
sortind = np.argsort(notvariable_recovered_periods)
notvariable_recovered_periods = notvariable_recovered_periods[sortind]
notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind]
outdict['notvariable_recovered_periods'] = notvariable_recovered_periods
outdict['notvariable_recovered_lspvals'] = notvariable_recovered_lspvals
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(notvariable_recovered_periods,
notvariable_recovered_lspvals,
ms=1.0,linestyle='none',marker='.')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('recovered normalized periodogram power')
plt.title('periodogram for actual not-variable objects')
plt.savefig(
os.path.join(recplotdir,
'recovered-periodogram-nonvariables.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 10. overall recovered period histogram for objects marked
# not-variable. this gives us the most common periods
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.hist(notvariable_recovered_periods,bins=np.arange(0.02,300.0,1.0e-3),
histtype='step')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('number of times periods recovered')
plt.title('recovered period histogram for non-variable objects')
plt.savefig(
os.path.join(recplotdir,
'recovered-period-hist-nonvariables.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# at the end, write the outdict to a pickle and return it
outfile = os.path.join(simbasedir, 'periodicvar-recovery-plotresults.pkl')
with open(outfile,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict | def function[plot_periodicvar_recovery_results, parameter[precvar_results, aliases_count_as_recovered, magbins, periodbins, amplitudebins, ndetbins, minbinsize, plotfile_ext]]:
constant[This plots the results of periodic var recovery.
This function makes plots for periodicvar recovered fraction as a function
of:
- magbin
- periodbin
- amplitude of variability
- ndet
with plot lines broken down by:
- magcol
- periodfinder
- vartype
- recovery status
The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be
used to set the bin lists as needed. The kwarg `minbinsize` controls how
many elements per bin are required to accept a bin in processing its
recovery characteristics for mags, periods, amplitudes, and ndets.
Parameters
----------
precvar_results : dict or str
This is either a dict returned by parallel_periodicvar_recovery or the
pickle created by that function.
aliases_count_as_recovered : list of str or 'all'
This is used to set which kinds of aliases this function considers as
'recovered' objects. Normally, we require that recovered objects have a
recovery status of 'actual' to indicate the actual period was
recovered. To change this default behavior, aliases_count_as_recovered
can be set to a list of alias status strings that should be considered
as 'recovered' objects as well. Choose from the following alias types::
'twice' recovered_p = 2.0*actual_p
'half' recovered_p = 0.5*actual_p
'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)
'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)
'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)
'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)
'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)
'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)
'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)
'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)
or set `aliases_count_as_recovered='all'` to include all of the above in
the 'recovered' periodic var list.
magbins : np.array
The magnitude bins to plot the recovery rate results over. If None, the
default mag bins will be used: `np.arange(8.0,16.25,0.25)`.
periodbins : np.array
The period bins to plot the recovery rate results over. If None, the
default period bins will be used: `np.arange(0.0,500.0,0.5)`.
amplitudebins : np.array
The variability amplitude bins to plot the recovery rate results
over. If None, the default amplitude bins will be used:
`np.arange(0.0,2.0,0.05)`.
ndetbins : np.array
The ndet bins to plot the recovery rate results over. If None, the
default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`.
minbinsize : int
The minimum number of objects per bin required to plot a bin and its
recovery fraction on the plot.
plotfile_ext : {'png','pdf'}
Sets the plot output files' extension.
Returns
-------
dict
A dict containing recovery fraction statistics and the paths to each of
the plots made.
]
if <ast.BoolOp object at 0x7da1aff34a00> begin[:]
with call[name[open], parameter[name[precvar_results], constant[rb]]] begin[:]
variable[precvar] assign[=] call[name[pickle].load, parameter[name[infd]]]
variable[simbasedir] assign[=] call[name[precvar]][constant[simbasedir]]
variable[lcinfof] assign[=] call[name[os].path.join, parameter[name[simbasedir], constant[fakelcs-info.pkl]]]
if <ast.UnaryOp object at 0x7da1b000ffa0> begin[:]
call[name[LOGERROR], parameter[binary_operation[constant[fakelcs-info.pkl does not exist in %s, can't continue] <ast.Mod object at 0x7da2590d6920> name[simbasedir]]]]
return[constant[None]]
with call[name[open], parameter[name[lcinfof], constant[rb]]] begin[:]
variable[lcinfo] assign[=] call[name[pickle].load, parameter[name[infd]]]
variable[magcols] assign[=] call[name[lcinfo]][constant[magcols]]
variable[objectid] assign[=] call[name[lcinfo]][constant[objectid]]
variable[ndet] assign[=] call[name[lcinfo]][constant[ndet]]
variable[sdssr] assign[=] call[name[lcinfo]][constant[sdssr]]
variable[actual_periodicvars] assign[=] call[name[precvar]][constant[actual_periodicvars]]
call[name[LOGINFO], parameter[constant[getting sdssr and ndet for actual periodic vars...]]]
variable[periodicvar_sdssr] assign[=] list[[]]
variable[periodicvar_ndet] assign[=] list[[]]
variable[periodicvar_objectids] assign[=] list[[]]
for taget[name[pobj]] in starred[name[actual_periodicvars]] begin[:]
variable[pobjind] assign[=] compare[name[objectid] equal[==] name[pobj]]
call[name[periodicvar_objectids].append, parameter[name[pobj]]]
call[name[periodicvar_sdssr].append, parameter[call[name[sdssr]][name[pobjind]]]]
call[name[periodicvar_ndet].append, parameter[call[name[ndet]][name[pobjind]]]]
variable[periodicvar_sdssr] assign[=] call[name[np].array, parameter[name[periodicvar_sdssr]]]
variable[periodicvar_objectids] assign[=] call[name[np].array, parameter[name[periodicvar_objectids]]]
variable[periodicvar_ndet] assign[=] call[name[np].array, parameter[name[periodicvar_ndet]]]
call[name[LOGINFO], parameter[constant[getting periods, vartypes, amplitudes, ndet for actual periodic vars...]]]
variable[periodicvar_periods] assign[=] <ast.ListComp object at 0x7da1b000e860>
variable[periodicvar_amplitudes] assign[=] <ast.ListComp object at 0x7da1b000e530>
variable[periodicvar_vartypes] assign[=] <ast.ListComp object at 0x7da1b000e200>
call[name[LOGINFO], parameter[constant[binning actual periodic vars by magnitude...]]]
variable[magbinned_sdssr] assign[=] list[[]]
variable[magbinned_periodicvars] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b000dd80> begin[:]
variable[magbins] assign[=] name[PERIODREC_DEFAULT_MAGBINS]
variable[magbininds] assign[=] call[name[np].digitize, parameter[call[name[np].ravel, parameter[name[periodicvar_sdssr]]], name[magbins]]]
for taget[tuple[[<ast.Name object at 0x7da1b000da50>, <ast.Name object at 0x7da1b000da20>]]] in starred[call[name[zip], parameter[call[name[np].unique, parameter[name[magbininds]]], call[name[range], parameter[binary_operation[call[name[len], parameter[name[magbins]]] - constant[1]]]]]]] begin[:]
variable[thisbin_periodicvars] assign[=] call[name[periodicvar_objectids]][compare[name[magbininds] equal[==] name[mbinind]]]
if compare[name[thisbin_periodicvars].size greater[>] binary_operation[name[minbinsize] - constant[1]]] begin[:]
call[name[magbinned_sdssr].append, parameter[binary_operation[binary_operation[call[name[magbins]][name[magi]] + call[name[magbins]][binary_operation[name[magi] + constant[1]]]] / constant[2.0]]]]
call[name[magbinned_periodicvars].append, parameter[name[thisbin_periodicvars]]]
call[name[LOGINFO], parameter[constant[binning actual periodic vars by period...]]]
variable[periodbinned_periods] assign[=] list[[]]
variable[periodbinned_periodicvars] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b000ce80> begin[:]
variable[periodbins] assign[=] name[PERIODREC_DEFAULT_PERIODBINS]
variable[periodbininds] assign[=] call[name[np].digitize, parameter[call[name[np].ravel, parameter[name[periodicvar_periods]]], name[periodbins]]]
for taget[tuple[[<ast.Name object at 0x7da1b000cb50>, <ast.Name object at 0x7da1b00c9060>]]] in starred[call[name[zip], parameter[call[name[np].unique, parameter[name[periodbininds]]], call[name[range], parameter[binary_operation[call[name[len], parameter[name[periodbins]]] - constant[1]]]]]]] begin[:]
variable[thisbin_periodicvars] assign[=] call[name[periodicvar_objectids]][compare[name[periodbininds] equal[==] name[pbinind]]]
if compare[name[thisbin_periodicvars].size greater[>] binary_operation[name[minbinsize] - constant[1]]] begin[:]
call[name[periodbinned_periods].append, parameter[binary_operation[binary_operation[call[name[periodbins]][name[peri]] + call[name[periodbins]][binary_operation[name[peri] + constant[1]]]] / constant[2.0]]]]
call[name[periodbinned_periodicvars].append, parameter[name[thisbin_periodicvars]]]
call[name[LOGINFO], parameter[constant[binning actual periodic vars by variability amplitude...]]]
variable[amplitudebinned_amplitudes] assign[=] list[[]]
variable[amplitudebinned_periodicvars] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b009b5e0> begin[:]
variable[amplitudebins] assign[=] name[PERIODREC_DEFAULT_AMPBINS]
variable[amplitudebininds] assign[=] call[name[np].digitize, parameter[call[name[np].ravel, parameter[call[name[np].abs, parameter[name[periodicvar_amplitudes]]]]], name[amplitudebins]]]
for taget[tuple[[<ast.Name object at 0x7da1b009b220>, <ast.Name object at 0x7da1b009b1f0>]]] in starred[call[name[zip], parameter[call[name[np].unique, parameter[name[amplitudebininds]]], call[name[range], parameter[binary_operation[call[name[len], parameter[name[amplitudebins]]] - constant[1]]]]]]] begin[:]
variable[thisbin_periodicvars] assign[=] call[name[periodicvar_objectids]][compare[name[amplitudebininds] equal[==] name[abinind]]]
if compare[name[thisbin_periodicvars].size greater[>] binary_operation[name[minbinsize] - constant[1]]] begin[:]
call[name[amplitudebinned_amplitudes].append, parameter[binary_operation[binary_operation[call[name[amplitudebins]][name[ampi]] + call[name[amplitudebins]][binary_operation[name[ampi] + constant[1]]]] / constant[2.0]]]]
call[name[amplitudebinned_periodicvars].append, parameter[name[thisbin_periodicvars]]]
call[name[LOGINFO], parameter[constant[binning actual periodic vars by ndet...]]]
variable[ndetbinned_ndets] assign[=] list[[]]
variable[ndetbinned_periodicvars] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da1b009a650> begin[:]
variable[ndetbins] assign[=] name[PERIODREC_DEFAULT_NDETBINS]
variable[ndetbininds] assign[=] call[name[np].digitize, parameter[call[name[np].ravel, parameter[name[periodicvar_ndet]]], name[ndetbins]]]
for taget[tuple[[<ast.Name object at 0x7da1b009a320>, <ast.Name object at 0x7da1b009a2f0>]]] in starred[call[name[zip], parameter[call[name[np].unique, parameter[name[ndetbininds]]], call[name[range], parameter[binary_operation[call[name[len], parameter[name[ndetbins]]] - constant[1]]]]]]] begin[:]
variable[thisbin_periodicvars] assign[=] call[name[periodicvar_objectids]][compare[name[ndetbininds] equal[==] name[nbinind]]]
if compare[name[thisbin_periodicvars].size greater[>] binary_operation[name[minbinsize] - constant[1]]] begin[:]
call[name[ndetbinned_ndets].append, parameter[binary_operation[binary_operation[call[name[ndetbins]][name[ndeti]] + call[name[ndetbins]][binary_operation[name[ndeti] + constant[1]]]] / constant[2.0]]]]
call[name[ndetbinned_periodicvars].append, parameter[name[thisbin_periodicvars]]]
variable[recovered_status] assign[=] list[[<ast.Constant object at 0x7da1b0099900>]]
if call[name[isinstance], parameter[name[aliases_count_as_recovered], name[list]]] begin[:]
for taget[name[atype]] in starred[name[aliases_count_as_recovered]] begin[:]
if compare[name[atype] in name[ALIAS_TYPES]] begin[:]
call[name[recovered_status].append, parameter[name[atype]]]
variable[recovered_periodicvars] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b0098fd0>]]
call[name[LOGINFO], parameter[binary_operation[constant[recovered %s/%s periodic variables (frac: %.3f) with period recovery status: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0098850>, <ast.Attribute object at 0x7da1b00987f0>, <ast.Call object at 0x7da1b0098790>, <ast.Call object at 0x7da1b0098640>]]]]]
variable[magbinned_recovered_objects] assign[=] <ast.ListComp object at 0x7da1b00984f0>
variable[magbinned_recfrac] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b0098220>]]
variable[periodbinned_recovered_objects] assign[=] <ast.ListComp object at 0x7da1b00f8790>
variable[periodbinned_recfrac] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b00f84c0>]]
variable[amplitudebinned_recovered_objects] assign[=] <ast.ListComp object at 0x7da1b00f8130>
variable[amplitudebinned_recfrac] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b01d7430>]]
variable[ndetbinned_recovered_objects] assign[=] <ast.ListComp object at 0x7da1b01d6500>
variable[ndetbinned_recfrac] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b01d7be0>]]
variable[magbinned_sdssr] assign[=] call[name[np].array, parameter[name[magbinned_sdssr]]]
variable[periodbinned_periods] assign[=] call[name[np].array, parameter[name[periodbinned_periods]]]
variable[amplitudebinned_amplitudes] assign[=] call[name[np].array, parameter[name[amplitudebinned_amplitudes]]]
variable[ndetbinned_ndets] assign[=] call[name[np].array, parameter[name[ndetbinned_ndets]]]
variable[outdict] assign[=] dictionary[[<ast.Constant object at 0x7da1b01d6770>, <ast.Constant object at 0x7da1b01d4af0>, <ast.Constant object at 0x7da1b01d5630>, <ast.Constant object at 0x7da1b01d5240>, <ast.Constant object at 0x7da1b01c49d0>, <ast.Constant object at 0x7da1b01c4cd0>, <ast.Constant object at 0x7da1b01c75b0>, <ast.Constant object at 0x7da1b01c5f60>, <ast.Constant object at 0x7da1b01c5450>, <ast.Constant object at 0x7da1b01c4b50>, <ast.Constant object at 0x7da1b01c4c10>, <ast.Constant object at 0x7da1b01c5540>, <ast.Constant object at 0x7da1b01c5630>, <ast.Constant object at 0x7da1b01c4550>, <ast.Constant object at 0x7da1b01c5b70>, <ast.Constant object at 0x7da1b01c7790>, <ast.Constant object at 0x7da1b01c7910>, <ast.Constant object at 0x7da1b01c55d0>, <ast.Constant object at 0x7da1b01c7880>, <ast.Constant object at 0x7da1b01c78e0>, <ast.Constant object at 0x7da1b01c6bf0>, <ast.Constant object at 0x7da1b01c77f0>, <ast.Constant object at 0x7da1b01c7730>, <ast.Constant object at 0x7da1b01c6b90>, <ast.Constant object at 0x7da1b01c6020>, <ast.Constant object at 0x7da1b01c6440>, <ast.Constant object at 0x7da1b01c6890>, <ast.Constant object at 0x7da1b01c6bc0>, <ast.Constant object at 0x7da1b01c6860>], [<ast.Name object at 0x7da1b01c62c0>, <ast.Name object at 0x7da1b01c44c0>, <ast.Name object at 0x7da1b01c6680>, <ast.Name object at 0x7da1b01c7550>, <ast.Name object at 0x7da1b01c5390>, <ast.Name object at 0x7da1b01c76a0>, <ast.Name object at 0x7da1b01c5c60>, <ast.Name object at 0x7da1b01c54e0>, <ast.Name object at 0x7da1b01c6740>, <ast.Name object at 0x7da1b01c5f00>, <ast.Name object at 0x7da1b01c4fd0>, <ast.Name object at 0x7da1b01c7850>, <ast.Name object at 0x7da1b01c7280>, <ast.Name object at 0x7da1b01c6410>, <ast.Name object at 0x7da1b01c72e0>, <ast.Name object at 0x7da1b01c6110>, <ast.Name object at 0x7da1b01c4910>, <ast.Name object at 0x7da1b01c4940>, <ast.Name object at 0x7da1b01c44f0>, <ast.Name object at 0x7da1b01c6470>, <ast.Name object at 0x7da1b01c6380>, <ast.Name object at 0x7da1b01c50f0>, <ast.Name object at 0x7da1b01c56f0>, <ast.Name object at 0x7da1b01c6650>, <ast.Name object at 0x7da1b01c4be0>, <ast.Name object at 0x7da1b01c47c0>, <ast.Name object at 0x7da1b01c4e80>, <ast.Name object at 0x7da1b01c4c70>, <ast.Name object at 0x7da1b01c7640>]]
variable[all_pfmethods] assign[=] call[name[np].unique, parameter[call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b01c6770>]]]]
variable[all_vartypes] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da1b01c5e40>]]
variable[all_aliastypes] assign[=] name[recovered_status]
call[name[outdict]][constant[aliastypes]] assign[=] name[all_aliastypes]
call[name[outdict]][constant[pfmethods]] assign[=] name[all_pfmethods]
call[name[outdict]][constant[vartypes]] assign[=] name[all_vartypes]
variable[magbinned_per_magcol_recfracs] assign[=] list[[]]
variable[magbinned_per_vartype_recfracs] assign[=] list[[]]
variable[magbinned_per_pfmethod_recfracs] assign[=] list[[]]
variable[magbinned_per_aliastype_recfracs] assign[=] list[[]]
variable[periodbinned_per_magcol_recfracs] assign[=] list[[]]
variable[periodbinned_per_vartype_recfracs] assign[=] list[[]]
variable[periodbinned_per_pfmethod_recfracs] assign[=] list[[]]
variable[periodbinned_per_aliastype_recfracs] assign[=] list[[]]
variable[amplitudebinned_per_magcol_recfracs] assign[=] list[[]]
variable[amplitudebinned_per_vartype_recfracs] assign[=] list[[]]
variable[amplitudebinned_per_pfmethod_recfracs] assign[=] list[[]]
variable[amplitudebinned_per_aliastype_recfracs] assign[=] list[[]]
variable[ndetbinned_per_magcol_recfracs] assign[=] list[[]]
variable[ndetbinned_per_vartype_recfracs] assign[=] list[[]]
variable[ndetbinned_per_pfmethod_recfracs] assign[=] list[[]]
variable[ndetbinned_per_aliastype_recfracs] assign[=] list[[]]
variable[recplotdir] assign[=] call[name[os].path.join, parameter[name[simbasedir], constant[periodic-variable-recovery-plots]]]
if <ast.UnaryOp object at 0x7da1b01c7130> begin[:]
call[name[os].mkdir, parameter[name[recplotdir]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[magbinned_sdssr], name[magbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[overall recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b00bc040>, <ast.Constant object at 0x7da1b00bcc40>]]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-magnitudes-overall.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
for taget[name[magcol]] in starred[name[magcols]] begin[:]
variable[thismagcol_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6aa710>, <ast.Name object at 0x7da20c6a98d0>]]] in starred[call[name[zip], parameter[name[magbinned_periodicvars], name[magbinned_recovered_objects]]]] begin[:]
variable[thisbin_thismagcol_recvars] assign[=] <ast.ListComp object at 0x7da20c6a8490>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thismagcol_recvars]]].size / name[magbin_pv].size]
call[name[thismagcol_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[magbinned_per_magcol_recfracs].append, parameter[call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], name[magbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per magcol recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b0141f90>, <ast.Constant object at 0x7da1b0143be0>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-magnitudes-magcols.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_pfmethods] assign[=] call[name[np].unique, parameter[call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b01433a0>]]]]
for taget[name[pfm]] in starred[name[all_pfmethods]] begin[:]
variable[thispf_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0142020>, <ast.Name object at 0x7da1b0143e80>]]] in starred[call[name[zip], parameter[name[magbinned_periodicvars], name[magbinned_recovered_objects]]]] begin[:]
variable[thisbin_thispf_recvars] assign[=] <ast.ListComp object at 0x7da1b0140ca0>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thispf_recvars]]].size / name[magbin_pv].size]
call[name[thispf_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[magbinned_per_pfmethod_recfracs].append, parameter[call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], name[magbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per period-finder recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da2041da860>, <ast.Constant object at 0x7da2041d9c60>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-magnitudes-pfmethod.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_vartypes] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da1b007eec0>]]
for taget[name[vt]] in starred[name[all_vartypes]] begin[:]
variable[thisvt_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b007e620>, <ast.Name object at 0x7da1b007e0b0>]]] in starred[call[name[zip], parameter[name[magbinned_periodicvars], name[magbinned_recovered_objects]]]] begin[:]
variable[thisbin_thisvt_recvars] assign[=] <ast.ListComp object at 0x7da1b007e2f0>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisvt_recvars]]].size / name[magbin_pv].size]
call[name[thisvt_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[magbinned_per_vartype_recfracs].append, parameter[call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], name[magbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per vartype recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b00371f0>, <ast.Constant object at 0x7da1b0037010>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-magnitudes-vartype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_aliastypes] assign[=] name[recovered_status]
for taget[name[at]] in starred[name[all_aliastypes]] begin[:]
variable[thisat_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b00362c0>, <ast.Name object at 0x7da1b0037130>]]] in starred[call[name[zip], parameter[name[magbinned_periodicvars], name[magbinned_recovered_objects]]]] begin[:]
variable[thisbin_thisat_recvars] assign[=] <ast.ListComp object at 0x7da1b00364a0>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisat_recvars]]].size / name[magbin_pv].size]
call[name[thisat_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[magbinned_per_aliastype_recfracs].append, parameter[call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[plt].plot, parameter[name[magbinned_sdssr], name[magbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per alias-type recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b0064730>, <ast.Constant object at 0x7da1b0066a10>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-magnitudes-aliastype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[periodbinned_periods], name[periodbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[periodic variable period [days]]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[overall recovery fraction by periodic var periods]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b0064a90>, <ast.Constant object at 0x7da1b0064c40>]]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-periods-overall.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
for taget[name[magcol]] in starred[name[magcols]] begin[:]
variable[thismagcol_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b00e6a10>, <ast.Name object at 0x7da1b00e52a0>]]] in starred[call[name[zip], parameter[name[periodbinned_periodicvars], name[periodbinned_recovered_objects]]]] begin[:]
variable[thisbin_thismagcol_recvars] assign[=] <ast.ListComp object at 0x7da1b00e52d0>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thismagcol_recvars]]].size / name[periodbin_pv].size]
call[name[thismagcol_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[periodbinned_periods], call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[periodbinned_per_magcol_recfracs].append, parameter[call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[plt].plot, parameter[name[periodbinned_periods], name[periodbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per magcol recovery fraction by periodic var periods]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b00e69e0>, <ast.Constant object at 0x7da1b00ff9d0>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-periods-magcols.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_pfmethods] assign[=] call[name[np].unique, parameter[call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b012dd50>]]]]
for taget[name[pfm]] in starred[name[all_pfmethods]] begin[:]
variable[thispf_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff4f340>, <ast.Name object at 0x7da1aff4ee60>]]] in starred[call[name[zip], parameter[name[periodbinned_periodicvars], name[periodbinned_recovered_objects]]]] begin[:]
variable[thisbin_thispf_recvars] assign[=] <ast.ListComp object at 0x7da1aff4c340>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thispf_recvars]]].size / name[periodbin_pv].size]
call[name[thispf_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[periodbinned_periods], call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[periodbinned_per_pfmethod_recfracs].append, parameter[call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[plt].plot, parameter[name[periodbinned_periods], name[periodbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per period-finder recovery fraction by periodic var periods]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff4d270>, <ast.Constant object at 0x7da1aff4d2d0>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-periods-pfmethod.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_vartypes] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da1aff3b880>]]
for taget[name[vt]] in starred[name[all_vartypes]] begin[:]
variable[thisvt_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff3b2b0>, <ast.Name object at 0x7da1aff3b280>]]] in starred[call[name[zip], parameter[name[periodbinned_periodicvars], name[periodbinned_recovered_objects]]]] begin[:]
variable[thisbin_thisvt_recvars] assign[=] <ast.ListComp object at 0x7da1aff3b130>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisvt_recvars]]].size / name[periodbin_pv].size]
call[name[thisvt_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[periodbinned_periods], call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[periodbinned_per_vartype_recfracs].append, parameter[call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[plt].plot, parameter[name[periodbinned_periods], name[periodbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per vartype recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff39f30>, <ast.Constant object at 0x7da1aff39f00>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-periods-vartype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_aliastypes] assign[=] name[recovered_status]
for taget[name[at]] in starred[name[all_aliastypes]] begin[:]
variable[thisat_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff39480>, <ast.Name object at 0x7da1aff39450>]]] in starred[call[name[zip], parameter[name[periodbinned_periodicvars], name[periodbinned_recovered_objects]]]] begin[:]
variable[thisbin_thisat_recvars] assign[=] <ast.ListComp object at 0x7da1aff39300>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisat_recvars]]].size / name[periodbin_pv].size]
call[name[thisat_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[periodbinned_periods], call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[periodbinned_per_aliastype_recfracs].append, parameter[call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[plt].plot, parameter[name[periodbinned_periods], name[periodbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per alias-type recovery fraction by periodic var magnitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff380a0>, <ast.Constant object at 0x7da1aff38070>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-periods-aliastype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], name[amplitudebinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[periodic variable amplitude [mag]]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[overall recovery fraction by periodic var amplitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff4b400>, <ast.Constant object at 0x7da1aff4b040>]]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-amplitudes-overall.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
for taget[name[magcol]] in starred[name[magcols]] begin[:]
variable[thismagcol_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff4a140>, <ast.Name object at 0x7da1aff49f00>]]] in starred[call[name[zip], parameter[name[amplitudebinned_periodicvars], name[amplitudebinned_recovered_objects]]]] begin[:]
variable[thisbin_thismagcol_recvars] assign[=] <ast.ListComp object at 0x7da1aff48130>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thismagcol_recvars]]].size / name[amplitudebin_pv].size]
call[name[thismagcol_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[amplitudebinned_per_magcol_recfracs].append, parameter[call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], name[amplitudebinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per magcol recovery fraction by periodic var amplitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff48a60>, <ast.Constant object at 0x7da1aff48a90>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-amplitudes-magcols.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_pfmethods] assign[=] call[name[np].unique, parameter[call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b012e170>]]]]
for taget[name[pfm]] in starred[name[all_pfmethods]] begin[:]
variable[thispf_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b012e4d0>, <ast.Name object at 0x7da1b012e2c0>]]] in starred[call[name[zip], parameter[name[amplitudebinned_periodicvars], name[amplitudebinned_recovered_objects]]]] begin[:]
variable[thisbin_thispf_recvars] assign[=] <ast.ListComp object at 0x7da1b012e710>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thispf_recvars]]].size / name[amplitudebin_pv].size]
call[name[thispf_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[amplitudebinned_per_pfmethod_recfracs].append, parameter[call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], name[amplitudebinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per period-finder recovery fraction by periodic var amplitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b00feaa0>, <ast.Constant object at 0x7da1b00ff850>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-amplitudes-pfmethod.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_vartypes] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da1b00135e0>]]
for taget[name[vt]] in starred[name[all_vartypes]] begin[:]
variable[thisvt_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0013d90>, <ast.Name object at 0x7da1b0013ca0>]]] in starred[call[name[zip], parameter[name[amplitudebinned_periodicvars], name[amplitudebinned_recovered_objects]]]] begin[:]
variable[thisbin_thisvt_recvars] assign[=] <ast.ListComp object at 0x7da1b0013f40>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisvt_recvars]]].size / name[amplitudebin_pv].size]
call[name[thisvt_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[amplitudebinned_per_vartype_recfracs].append, parameter[call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], name[amplitudebinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per vartype recovery fraction by periodic var amplitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b0006c20>, <ast.Constant object at 0x7da1b0006980>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-amplitudes-vartype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_aliastypes] assign[=] name[recovered_status]
for taget[name[at]] in starred[name[all_aliastypes]] begin[:]
variable[thisat_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0007760>, <ast.Name object at 0x7da1b0006500>]]] in starred[call[name[zip], parameter[name[amplitudebinned_periodicvars], name[amplitudebinned_recovered_objects]]]] begin[:]
variable[thisbin_thisat_recvars] assign[=] <ast.ListComp object at 0x7da1b0006b90>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisat_recvars]]].size / name[amplitudebin_pv].size]
call[name[thisat_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[amplitudebinned_per_aliastype_recfracs].append, parameter[call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[plt].plot, parameter[name[amplitudebinned_amplitudes], name[amplitudebinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per alias-type recovery fraction by periodic var amplitudes]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b01e6950>, <ast.Constant object at 0x7da1b01e71c0>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-amplitudes-aliastype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], name[ndetbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[periodic variable light curve points]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[overall recovery fraction by periodic var ndet]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b00b0be0>, <ast.Constant object at 0x7da1b00b07c0>]]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-ndet-overall.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
for taget[name[magcol]] in starred[name[magcols]] begin[:]
variable[thismagcol_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b00b0520>, <ast.Name object at 0x7da1b00b0220>]]] in starred[call[name[zip], parameter[name[ndetbinned_periodicvars], name[ndetbinned_recovered_objects]]]] begin[:]
variable[thisbin_thismagcol_recvars] assign[=] <ast.ListComp object at 0x7da1b00b0df0>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thismagcol_recvars]]].size / name[ndetbin_pv].size]
call[name[thismagcol_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[ndetbinned_per_magcol_recfracs].append, parameter[call[name[np].array, parameter[name[thismagcol_recfracs]]]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], name[ndetbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per magcol recovery fraction by periodic var ndets]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1b00b26e0>, <ast.Constant object at 0x7da1b00b2740>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-ndet-magcols.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_pfmethods] assign[=] call[name[np].unique, parameter[call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1b00b2b90>]]]]
for taget[name[pfm]] in starred[name[all_pfmethods]] begin[:]
variable[thispf_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b00b31f0>, <ast.Name object at 0x7da1b00b3310>]]] in starred[call[name[zip], parameter[name[ndetbinned_periodicvars], name[ndetbinned_recovered_objects]]]] begin[:]
variable[thisbin_thispf_recvars] assign[=] <ast.ListComp object at 0x7da1b00b1e70>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thispf_recvars]]].size / name[ndetbin_pv].size]
call[name[thispf_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[ndetbinned_per_pfmethod_recfracs].append, parameter[call[name[np].array, parameter[name[thispf_recfracs]]]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], name[ndetbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per period-finder recovery fraction by periodic var ndets]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff4a4d0>, <ast.Constant object at 0x7da1aff4a4a0>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-ndet-pfmethod.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_vartypes] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da1aff486a0>]]
for taget[name[vt]] in starred[name[all_vartypes]] begin[:]
variable[thisvt_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff498d0>, <ast.Name object at 0x7da1aff48040>]]] in starred[call[name[zip], parameter[name[ndetbinned_periodicvars], name[ndetbinned_recovered_objects]]]] begin[:]
variable[thisbin_thisvt_recvars] assign[=] <ast.ListComp object at 0x7da1aff7c160>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisvt_recvars]]].size / name[ndetbin_pv].size]
call[name[thisvt_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[ndetbinned_per_vartype_recfracs].append, parameter[call[name[np].array, parameter[name[thisvt_recfracs]]]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], name[ndetbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per vartype recovery fraction by periodic var ndets]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff7d360>, <ast.Constant object at 0x7da1aff7d390>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-ndet-vartype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[all_aliastypes] assign[=] name[recovered_status]
for taget[name[at]] in starred[name[all_aliastypes]] begin[:]
variable[thisat_recfracs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff7de10>, <ast.Name object at 0x7da1aff7de40>]]] in starred[call[name[zip], parameter[name[ndetbinned_periodicvars], name[ndetbinned_recovered_objects]]]] begin[:]
variable[thisbin_thisat_recvars] assign[=] <ast.ListComp object at 0x7da1aff7df90>
variable[thisbin_thismagcol_recfrac] assign[=] binary_operation[call[name[np].array, parameter[name[thisbin_thisat_recvars]]].size / name[ndetbin_pv].size]
call[name[thisat_recfracs].append, parameter[name[thisbin_thismagcol_recfrac]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[ndetbinned_per_aliastype_recfracs].append, parameter[call[name[np].array, parameter[name[thisat_recfracs]]]]]
call[name[plt].plot, parameter[name[ndetbinned_ndets], name[ndetbinned_recfrac]]]
call[name[plt].xlabel, parameter[constant[SDSS $r$ magnitude]]]
call[name[plt].ylabel, parameter[constant[recovered fraction of periodic variables]]]
call[name[plt].title, parameter[constant[per alias-type recovery fraction by periodic var ndets]]]
call[name[plt].ylim, parameter[tuple[[<ast.Constant object at 0x7da1aff7f1f0>, <ast.Constant object at 0x7da1aff7f220>]]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-binned-ndet-aliastype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
call[name[outdict]][constant[magbinned_per_magcol_recfracs]] assign[=] name[magbinned_per_magcol_recfracs]
call[name[outdict]][constant[magbinned_per_pfmethod_recfracs]] assign[=] name[magbinned_per_pfmethod_recfracs]
call[name[outdict]][constant[magbinned_per_vartype_recfracs]] assign[=] name[magbinned_per_vartype_recfracs]
call[name[outdict]][constant[magbinned_per_aliastype_recfracs]] assign[=] name[magbinned_per_aliastype_recfracs]
call[name[outdict]][constant[periodbinned_per_magcol_recfracs]] assign[=] name[periodbinned_per_magcol_recfracs]
call[name[outdict]][constant[periodbinned_per_pfmethod_recfracs]] assign[=] name[periodbinned_per_pfmethod_recfracs]
call[name[outdict]][constant[periodbinned_per_vartype_recfracs]] assign[=] name[periodbinned_per_vartype_recfracs]
call[name[outdict]][constant[periodbinned_per_aliastype_recfracs]] assign[=] name[periodbinned_per_aliastype_recfracs]
call[name[outdict]][constant[amplitudebinned_per_magcol_recfracs]] assign[=] name[amplitudebinned_per_magcol_recfracs]
call[name[outdict]][constant[amplitudebinned_per_pfmethod_recfracs]] assign[=] name[amplitudebinned_per_pfmethod_recfracs]
call[name[outdict]][constant[amplitudebinned_per_vartype_recfracs]] assign[=] name[amplitudebinned_per_vartype_recfracs]
call[name[outdict]][constant[amplitudebinned_per_aliastype_recfracs]] assign[=] name[amplitudebinned_per_aliastype_recfracs]
call[name[outdict]][constant[ndetbinned_per_magcol_recfracs]] assign[=] name[ndetbinned_per_magcol_recfracs]
call[name[outdict]][constant[ndetbinned_per_pfmethod_recfracs]] assign[=] name[ndetbinned_per_pfmethod_recfracs]
call[name[outdict]][constant[ndetbinned_per_vartype_recfracs]] assign[=] name[ndetbinned_per_vartype_recfracs]
call[name[outdict]][constant[ndetbinned_per_aliastype_recfracs]] assign[=] name[ndetbinned_per_aliastype_recfracs]
variable[overall_recvars_per_pfmethod] assign[=] list[[]]
for taget[name[pfm]] in starred[name[all_pfmethods]] begin[:]
variable[thispfm_recvars] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff78970>]]
call[name[overall_recvars_per_pfmethod].append, parameter[name[thispfm_recvars]]]
variable[overall_recvars_per_vartype] assign[=] list[[]]
for taget[name[vt]] in starred[name[all_vartypes]] begin[:]
variable[thisvt_recvars] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff79090>]]
call[name[overall_recvars_per_vartype].append, parameter[name[thisvt_recvars]]]
variable[overall_recvars_per_magcol] assign[=] list[[]]
for taget[name[mc]] in starred[name[magcols]] begin[:]
variable[thismc_recvars] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff797b0>]]
call[name[overall_recvars_per_magcol].append, parameter[name[thismc_recvars]]]
variable[overall_recvars_per_aliastype] assign[=] list[[]]
for taget[name[at]] in starred[name[all_aliastypes]] begin[:]
variable[thisat_recvars] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff79ed0>]]
call[name[overall_recvars_per_aliastype].append, parameter[name[thisat_recvars]]]
call[name[outdict]][constant[overall_recfrac_per_pfmethod]] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff7a530>]]
call[name[outdict]][constant[overall_recfrac_per_vartype]] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff7a860>]]
call[name[outdict]][constant[overall_recfrac_per_magcol]] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff7ab90>]]
call[name[outdict]][constant[overall_recfrac_per_aliastype]] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1aff7aec0>]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[xt] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[all_pfmethods]]]]]
variable[xl] assign[=] name[all_pfmethods]
call[name[plt].barh, parameter[name[xt], call[name[outdict]][constant[overall_recfrac_per_pfmethod]], constant[0.5]]]
call[name[plt].yticks, parameter[name[xt], name[xl]]]
call[name[plt].xlabel, parameter[constant[period-finding method]]]
call[name[plt].ylabel, parameter[constant[overall recovery rate]]]
call[name[plt].title, parameter[constant[overall recovery rate per period-finding method]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-overall-pfmethod.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[xt] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[magcols]]]]]
variable[xl] assign[=] name[magcols]
call[name[plt].barh, parameter[name[xt], call[name[outdict]][constant[overall_recfrac_per_magcol]], constant[0.5]]]
call[name[plt].yticks, parameter[name[xt], name[xl]]]
call[name[plt].xlabel, parameter[constant[light curve magnitude column]]]
call[name[plt].ylabel, parameter[constant[overall recovery rate]]]
call[name[plt].title, parameter[constant[overall recovery rate per light curve magcol]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-overall-magcol.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[xt] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[all_aliastypes]]]]]
variable[xl] assign[=] name[all_aliastypes]
call[name[plt].barh, parameter[name[xt], call[name[outdict]][constant[overall_recfrac_per_aliastype]], constant[0.5]]]
call[name[plt].yticks, parameter[name[xt], name[xl]]]
call[name[plt].xlabel, parameter[constant[period recovery status]]]
call[name[plt].ylabel, parameter[constant[overall recovery rate]]]
call[name[plt].title, parameter[constant[overall recovery rate per period recovery status]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-overall-aliastype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[xt] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[all_vartypes]]]]]
variable[xl] assign[=] name[all_vartypes]
call[name[plt].barh, parameter[name[xt], call[name[outdict]][constant[overall_recfrac_per_vartype]], constant[0.5]]]
call[name[plt].yticks, parameter[name[xt], name[xl]]]
call[name[plt].xlabel, parameter[constant[periodic variable type]]]
call[name[plt].ylabel, parameter[constant[overall recovery rate]]]
call[name[plt].title, parameter[constant[overall recovery rate per periodic variable type]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recfrac-overall-vartype.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[notvariable_recovered_periods] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1aff96f50>]]
variable[notvariable_recovered_lspvals] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da1aff97490>]]
variable[sortind] assign[=] call[name[np].argsort, parameter[name[notvariable_recovered_periods]]]
variable[notvariable_recovered_periods] assign[=] call[name[notvariable_recovered_periods]][name[sortind]]
variable[notvariable_recovered_lspvals] assign[=] call[name[notvariable_recovered_lspvals]][name[sortind]]
call[name[outdict]][constant[notvariable_recovered_periods]] assign[=] name[notvariable_recovered_periods]
call[name[outdict]][constant[notvariable_recovered_lspvals]] assign[=] name[notvariable_recovered_lspvals]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[notvariable_recovered_periods], name[notvariable_recovered_lspvals]]]
call[name[plt].xscale, parameter[constant[log]]]
call[name[plt].xlabel, parameter[constant[recovered periods [days]]]]
call[name[plt].ylabel, parameter[constant[recovered normalized periodogram power]]]
call[name[plt].title, parameter[constant[periodogram for actual not-variable objects]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recovered-periodogram-nonvariables.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].hist, parameter[name[notvariable_recovered_periods]]]
call[name[plt].xscale, parameter[constant[log]]]
call[name[plt].xlabel, parameter[constant[recovered periods [days]]]]
call[name[plt].ylabel, parameter[constant[number of times periods recovered]]]
call[name[plt].title, parameter[constant[recovered period histogram for non-variable objects]]]
call[name[plt].savefig, parameter[call[name[os].path.join, parameter[name[recplotdir], binary_operation[constant[recovered-period-hist-nonvariables.%s] <ast.Mod object at 0x7da2590d6920> name[plotfile_ext]]]]]]
call[name[plt].close, parameter[constant[all]]]
variable[outfile] assign[=] call[name[os].path.join, parameter[name[simbasedir], constant[periodicvar-recovery-plotresults.pkl]]]
with call[name[open], parameter[name[outfile], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[outdict], name[outfd], name[pickle].HIGHEST_PROTOCOL]]
return[name[outdict]] | keyword[def] identifier[plot_periodicvar_recovery_results] (
identifier[precvar_results] ,
identifier[aliases_count_as_recovered] = keyword[None] ,
identifier[magbins] = keyword[None] ,
identifier[periodbins] = keyword[None] ,
identifier[amplitudebins] = keyword[None] ,
identifier[ndetbins] = keyword[None] ,
identifier[minbinsize] = literal[int] ,
identifier[plotfile_ext] = literal[string] ,
):
literal[string]
keyword[if] identifier[isinstance] ( identifier[precvar_results] , identifier[str] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[precvar_results] ):
keyword[with] identifier[open] ( identifier[precvar_results] , literal[string] ) keyword[as] identifier[infd] :
identifier[precvar] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[elif] identifier[isinstance] ( identifier[precvar_results] , identifier[dict] ):
identifier[precvar] = identifier[precvar_results]
keyword[else] :
identifier[LOGERROR] ( literal[string]
literal[string] )
keyword[return] keyword[None]
identifier[simbasedir] = identifier[precvar] [ literal[string] ]
identifier[lcinfof] = identifier[os] . identifier[path] . identifier[join] ( identifier[simbasedir] , literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[lcinfof] ):
identifier[LOGERROR] ( literal[string] %
identifier[simbasedir] )
keyword[return] keyword[None]
keyword[with] identifier[open] ( identifier[lcinfof] , literal[string] ) keyword[as] identifier[infd] :
identifier[lcinfo] = identifier[pickle] . identifier[load] ( identifier[infd] )
identifier[magcols] = identifier[lcinfo] [ literal[string] ]
identifier[objectid] = identifier[lcinfo] [ literal[string] ]
identifier[ndet] = identifier[lcinfo] [ literal[string] ]
identifier[sdssr] = identifier[lcinfo] [ literal[string] ]
identifier[actual_periodicvars] = identifier[precvar] [ literal[string] ]
identifier[LOGINFO] ( literal[string] )
identifier[periodicvar_sdssr] =[]
identifier[periodicvar_ndet] =[]
identifier[periodicvar_objectids] =[]
keyword[for] identifier[pobj] keyword[in] identifier[actual_periodicvars] :
identifier[pobjind] = identifier[objectid] == identifier[pobj]
identifier[periodicvar_objectids] . identifier[append] ( identifier[pobj] )
identifier[periodicvar_sdssr] . identifier[append] ( identifier[sdssr] [ identifier[pobjind] ])
identifier[periodicvar_ndet] . identifier[append] ( identifier[ndet] [ identifier[pobjind] ])
identifier[periodicvar_sdssr] = identifier[np] . identifier[array] ( identifier[periodicvar_sdssr] )
identifier[periodicvar_objectids] = identifier[np] . identifier[array] ( identifier[periodicvar_objectids] )
identifier[periodicvar_ndet] = identifier[np] . identifier[array] ( identifier[periodicvar_ndet] )
identifier[LOGINFO] ( literal[string]
literal[string] )
identifier[periodicvar_periods] =[
identifier[np] . identifier[asscalar] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[periodicvar_objectids]
]
identifier[periodicvar_amplitudes] =[
identifier[np] . identifier[asscalar] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[periodicvar_objectids]
]
identifier[periodicvar_vartypes] =[
identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[periodicvar_objectids]
]
identifier[LOGINFO] ( literal[string] )
identifier[magbinned_sdssr] =[]
identifier[magbinned_periodicvars] =[]
keyword[if] keyword[not] identifier[magbins] :
identifier[magbins] = identifier[PERIODREC_DEFAULT_MAGBINS]
identifier[magbininds] = identifier[np] . identifier[digitize] ( identifier[np] . identifier[ravel] ( identifier[periodicvar_sdssr] ), identifier[magbins] )
keyword[for] identifier[mbinind] , identifier[magi] keyword[in] identifier[zip] ( identifier[np] . identifier[unique] ( identifier[magbininds] ),
identifier[range] ( identifier[len] ( identifier[magbins] )- literal[int] )):
identifier[thisbin_periodicvars] = identifier[periodicvar_objectids] [ identifier[magbininds] == identifier[mbinind] ]
keyword[if] ( identifier[thisbin_periodicvars] . identifier[size] >( identifier[minbinsize] - literal[int] )):
identifier[magbinned_sdssr] . identifier[append] (( identifier[magbins] [ identifier[magi] ]+ identifier[magbins] [ identifier[magi] + literal[int] ])/ literal[int] )
identifier[magbinned_periodicvars] . identifier[append] ( identifier[thisbin_periodicvars] )
identifier[LOGINFO] ( literal[string] )
identifier[periodbinned_periods] =[]
identifier[periodbinned_periodicvars] =[]
keyword[if] keyword[not] identifier[periodbins] :
identifier[periodbins] = identifier[PERIODREC_DEFAULT_PERIODBINS]
identifier[periodbininds] = identifier[np] . identifier[digitize] ( identifier[np] . identifier[ravel] ( identifier[periodicvar_periods] ), identifier[periodbins] )
keyword[for] identifier[pbinind] , identifier[peri] keyword[in] identifier[zip] ( identifier[np] . identifier[unique] ( identifier[periodbininds] ),
identifier[range] ( identifier[len] ( identifier[periodbins] )- literal[int] )):
identifier[thisbin_periodicvars] = identifier[periodicvar_objectids] [ identifier[periodbininds] == identifier[pbinind] ]
keyword[if] ( identifier[thisbin_periodicvars] . identifier[size] >( identifier[minbinsize] - literal[int] )):
identifier[periodbinned_periods] . identifier[append] (( identifier[periodbins] [ identifier[peri] ]+
identifier[periodbins] [ identifier[peri] + literal[int] ])/ literal[int] )
identifier[periodbinned_periodicvars] . identifier[append] ( identifier[thisbin_periodicvars] )
identifier[LOGINFO] ( literal[string] )
identifier[amplitudebinned_amplitudes] =[]
identifier[amplitudebinned_periodicvars] =[]
keyword[if] keyword[not] identifier[amplitudebins] :
identifier[amplitudebins] = identifier[PERIODREC_DEFAULT_AMPBINS]
identifier[amplitudebininds] = identifier[np] . identifier[digitize] ( identifier[np] . identifier[ravel] ( identifier[np] . identifier[abs] ( identifier[periodicvar_amplitudes] )),
identifier[amplitudebins] )
keyword[for] identifier[abinind] , identifier[ampi] keyword[in] identifier[zip] ( identifier[np] . identifier[unique] ( identifier[amplitudebininds] ),
identifier[range] ( identifier[len] ( identifier[amplitudebins] )- literal[int] )):
identifier[thisbin_periodicvars] = identifier[periodicvar_objectids] [
identifier[amplitudebininds] == identifier[abinind]
]
keyword[if] ( identifier[thisbin_periodicvars] . identifier[size] >( identifier[minbinsize] - literal[int] )):
identifier[amplitudebinned_amplitudes] . identifier[append] (
( identifier[amplitudebins] [ identifier[ampi] ]+
identifier[amplitudebins] [ identifier[ampi] + literal[int] ])/ literal[int]
)
identifier[amplitudebinned_periodicvars] . identifier[append] ( identifier[thisbin_periodicvars] )
identifier[LOGINFO] ( literal[string] )
identifier[ndetbinned_ndets] =[]
identifier[ndetbinned_periodicvars] =[]
keyword[if] keyword[not] identifier[ndetbins] :
identifier[ndetbins] = identifier[PERIODREC_DEFAULT_NDETBINS]
identifier[ndetbininds] = identifier[np] . identifier[digitize] ( identifier[np] . identifier[ravel] ( identifier[periodicvar_ndet] ), identifier[ndetbins] )
keyword[for] identifier[nbinind] , identifier[ndeti] keyword[in] identifier[zip] ( identifier[np] . identifier[unique] ( identifier[ndetbininds] ),
identifier[range] ( identifier[len] ( identifier[ndetbins] )- literal[int] )):
identifier[thisbin_periodicvars] = identifier[periodicvar_objectids] [ identifier[ndetbininds] == identifier[nbinind] ]
keyword[if] ( identifier[thisbin_periodicvars] . identifier[size] >( identifier[minbinsize] - literal[int] )):
identifier[ndetbinned_ndets] . identifier[append] (
( identifier[ndetbins] [ identifier[ndeti] ]+
identifier[ndetbins] [ identifier[ndeti] + literal[int] ])/ literal[int]
)
identifier[ndetbinned_periodicvars] . identifier[append] ( identifier[thisbin_periodicvars] )
identifier[recovered_status] =[ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[aliases_count_as_recovered] , identifier[list] ):
keyword[for] identifier[atype] keyword[in] identifier[aliases_count_as_recovered] :
keyword[if] identifier[atype] keyword[in] identifier[ALIAS_TYPES] :
identifier[recovered_status] . identifier[append] ( identifier[atype] )
keyword[else] :
identifier[LOGWARNING] ( literal[string] % identifier[atype] )
keyword[elif] identifier[aliases_count_as_recovered] keyword[and] identifier[aliases_count_as_recovered] == literal[string] :
keyword[for] identifier[atype] keyword[in] identifier[ALIAS_TYPES] [ literal[int] :]:
identifier[recovered_status] . identifier[append] ( identifier[atype] )
identifier[recovered_periodicvars] = identifier[np] . identifier[array] (
[ identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]
keyword[in] identifier[recovered_status] )],
identifier[dtype] = identifier[np] . identifier[unicode_]
)
identifier[LOGINFO] ( literal[string]
literal[string] %
( identifier[recovered_periodicvars] . identifier[size] ,
identifier[actual_periodicvars] . identifier[size] ,
identifier[float] ( identifier[recovered_periodicvars] . identifier[size] / identifier[actual_periodicvars] . identifier[size] ),
literal[string] . identifier[join] ( identifier[recovered_status] )))
identifier[magbinned_recovered_objects] =[
identifier[np] . identifier[intersect1d] ( identifier[x] , identifier[recovered_periodicvars] )
keyword[for] identifier[x] keyword[in] identifier[magbinned_periodicvars]
]
identifier[magbinned_recfrac] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[x] . identifier[size] / identifier[y] . identifier[size] ) keyword[for] identifier[x] , identifier[y]
keyword[in] identifier[zip] ( identifier[magbinned_recovered_objects] ,
identifier[magbinned_periodicvars] )])
identifier[periodbinned_recovered_objects] =[
identifier[np] . identifier[intersect1d] ( identifier[x] , identifier[recovered_periodicvars] )
keyword[for] identifier[x] keyword[in] identifier[periodbinned_periodicvars]
]
identifier[periodbinned_recfrac] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[x] . identifier[size] / identifier[y] . identifier[size] ) keyword[for] identifier[x] , identifier[y]
keyword[in] identifier[zip] ( identifier[periodbinned_recovered_objects] ,
identifier[periodbinned_periodicvars] )])
identifier[amplitudebinned_recovered_objects] =[
identifier[np] . identifier[intersect1d] ( identifier[x] , identifier[recovered_periodicvars] )
keyword[for] identifier[x] keyword[in] identifier[amplitudebinned_periodicvars]
]
identifier[amplitudebinned_recfrac] = identifier[np] . identifier[array] (
[ identifier[float] ( identifier[x] . identifier[size] / identifier[y] . identifier[size] ) keyword[for] identifier[x] , identifier[y]
keyword[in] identifier[zip] ( identifier[amplitudebinned_recovered_objects] ,
identifier[amplitudebinned_periodicvars] )]
)
identifier[ndetbinned_recovered_objects] =[
identifier[np] . identifier[intersect1d] ( identifier[x] , identifier[recovered_periodicvars] )
keyword[for] identifier[x] keyword[in] identifier[ndetbinned_periodicvars]
]
identifier[ndetbinned_recfrac] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[x] . identifier[size] / identifier[y] . identifier[size] ) keyword[for] identifier[x] , identifier[y]
keyword[in] identifier[zip] ( identifier[ndetbinned_recovered_objects] ,
identifier[ndetbinned_periodicvars] )])
identifier[magbinned_sdssr] = identifier[np] . identifier[array] ( identifier[magbinned_sdssr] )
identifier[periodbinned_periods] = identifier[np] . identifier[array] ( identifier[periodbinned_periods] )
identifier[amplitudebinned_amplitudes] = identifier[np] . identifier[array] ( identifier[amplitudebinned_amplitudes] )
identifier[ndetbinned_ndets] = identifier[np] . identifier[array] ( identifier[ndetbinned_ndets] )
identifier[outdict] ={
literal[string] : identifier[simbasedir] ,
literal[string] : identifier[precvar] ,
literal[string] : identifier[magcols] ,
literal[string] : identifier[objectid] ,
literal[string] : identifier[ndet] ,
literal[string] : identifier[sdssr] ,
literal[string] : identifier[actual_periodicvars] ,
literal[string] : identifier[recovered_periodicvars] ,
literal[string] : identifier[recovered_status] ,
literal[string] : identifier[magbins] ,
literal[string] : identifier[magbinned_sdssr] ,
literal[string] : identifier[magbinned_periodicvars] ,
literal[string] : identifier[magbinned_recovered_objects] ,
literal[string] : identifier[magbinned_recfrac] ,
literal[string] : identifier[periodbins] ,
literal[string] : identifier[periodbinned_periods] ,
literal[string] : identifier[periodbinned_periodicvars] ,
literal[string] : identifier[periodbinned_recovered_objects] ,
literal[string] : identifier[periodbinned_recfrac] ,
literal[string] : identifier[amplitudebins] ,
literal[string] : identifier[amplitudebinned_amplitudes] ,
literal[string] : identifier[amplitudebinned_periodicvars] ,
literal[string] : identifier[amplitudebinned_recovered_objects] ,
literal[string] : identifier[amplitudebinned_recfrac] ,
literal[string] : identifier[ndetbins] ,
literal[string] : identifier[ndetbinned_ndets] ,
literal[string] : identifier[ndetbinned_periodicvars] ,
literal[string] : identifier[ndetbinned_recovered_objects] ,
literal[string] : identifier[ndetbinned_recfrac] ,
}
identifier[all_pfmethods] = identifier[np] . identifier[unique] (
identifier[np] . identifier[concatenate] (
[ identifier[np] . identifier[unique] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ]]
)
)
identifier[all_vartypes] = identifier[np] . identifier[unique] (
[( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[is] keyword[not] keyword[None] )]
)
identifier[all_aliastypes] = identifier[recovered_status]
identifier[outdict] [ literal[string] ]= identifier[all_aliastypes]
identifier[outdict] [ literal[string] ]= identifier[all_pfmethods]
identifier[outdict] [ literal[string] ]= identifier[all_vartypes]
identifier[magbinned_per_magcol_recfracs] =[]
identifier[magbinned_per_vartype_recfracs] =[]
identifier[magbinned_per_pfmethod_recfracs] =[]
identifier[magbinned_per_aliastype_recfracs] =[]
identifier[periodbinned_per_magcol_recfracs] =[]
identifier[periodbinned_per_vartype_recfracs] =[]
identifier[periodbinned_per_pfmethod_recfracs] =[]
identifier[periodbinned_per_aliastype_recfracs] =[]
identifier[amplitudebinned_per_magcol_recfracs] =[]
identifier[amplitudebinned_per_vartype_recfracs] =[]
identifier[amplitudebinned_per_pfmethod_recfracs] =[]
identifier[amplitudebinned_per_aliastype_recfracs] =[]
identifier[ndetbinned_per_magcol_recfracs] =[]
identifier[ndetbinned_per_vartype_recfracs] =[]
identifier[ndetbinned_per_pfmethod_recfracs] =[]
identifier[ndetbinned_per_aliastype_recfracs] =[]
identifier[recplotdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[simbasedir] , literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[recplotdir] ):
identifier[os] . identifier[mkdir] ( identifier[recplotdir] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] , identifier[magbinned_recfrac] , identifier[marker] = literal[string] , identifier[ms] = literal[int] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
keyword[for] identifier[magcol] keyword[in] identifier[magcols] :
identifier[thismagcol_recfracs] =[]
keyword[for] identifier[magbin_pv] , identifier[magbin_rv] keyword[in] identifier[zip] ( identifier[magbinned_periodicvars] ,
identifier[magbinned_recovered_objects] ):
identifier[thisbin_thismagcol_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[magbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[magcol] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thismagcol_recvars] ). identifier[size] /
identifier[magbin_pv] . identifier[size]
)
identifier[thismagcol_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] ,
identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[magcol] ,
identifier[ms] = literal[int] )
identifier[magbinned_per_magcol_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] , identifier[magbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_pfmethods] = identifier[np] . identifier[unique] (
identifier[np] . identifier[concatenate] (
[ identifier[np] . identifier[unique] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ]]
)
)
keyword[for] identifier[pfm] keyword[in] identifier[all_pfmethods] :
identifier[thispf_recfracs] =[]
keyword[for] identifier[magbin_pv] , identifier[magbin_rv] keyword[in] identifier[zip] ( identifier[magbinned_periodicvars] ,
identifier[magbinned_recovered_objects] ):
identifier[thisbin_thispf_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[magbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[pfm] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thispf_recvars] ). identifier[size] /
identifier[magbin_pv] . identifier[size]
)
identifier[thispf_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] ,
identifier[np] . identifier[array] ( identifier[thispf_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[pfm] . identifier[upper] (),
identifier[ms] = literal[int] )
identifier[magbinned_per_pfmethod_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thispf_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] , identifier[magbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_vartypes] = identifier[np] . identifier[unique] (
[( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[is] keyword[not] keyword[None] )]
)
keyword[for] identifier[vt] keyword[in] identifier[all_vartypes] :
identifier[thisvt_recfracs] =[]
keyword[for] identifier[magbin_pv] , identifier[magbin_rv] keyword[in] identifier[zip] ( identifier[magbinned_periodicvars] ,
identifier[magbinned_recovered_objects] ):
identifier[thisbin_thisvt_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[magbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[vt] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisvt_recvars] ). identifier[size] /
identifier[magbin_pv] . identifier[size]
)
identifier[thisvt_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] ,
identifier[np] . identifier[array] ( identifier[thisvt_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[vt] ,
identifier[ms] = literal[int] )
identifier[magbinned_per_vartype_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thisvt_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] , identifier[magbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_aliastypes] = identifier[recovered_status]
keyword[for] identifier[at] keyword[in] identifier[all_aliastypes] :
identifier[thisat_recfracs] =[]
keyword[for] identifier[magbin_pv] , identifier[magbin_rv] keyword[in] identifier[zip] ( identifier[magbinned_periodicvars] ,
identifier[magbinned_recovered_objects] ):
identifier[thisbin_thisat_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[magbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ][ literal[int] ]== identifier[at] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisat_recvars] ). identifier[size] /
identifier[magbin_pv] . identifier[size]
)
identifier[thisat_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] ,
identifier[np] . identifier[array] ( identifier[thisat_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[at] ,
identifier[ms] = literal[int] )
identifier[magbinned_per_aliastype_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thisat_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[magbinned_sdssr] , identifier[magbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] , identifier[periodbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
keyword[for] identifier[magcol] keyword[in] identifier[magcols] :
identifier[thismagcol_recfracs] =[]
keyword[for] identifier[periodbin_pv] , identifier[periodbin_rv] keyword[in] identifier[zip] ( identifier[periodbinned_periodicvars] ,
identifier[periodbinned_recovered_objects] ):
identifier[thisbin_thismagcol_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[periodbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[magcol] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thismagcol_recvars] ). identifier[size] /
identifier[periodbin_pv] . identifier[size]
)
identifier[thismagcol_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] ,
identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[magcol] ,
identifier[ms] = literal[int] )
identifier[periodbinned_per_magcol_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] , identifier[periodbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_pfmethods] = identifier[np] . identifier[unique] (
identifier[np] . identifier[concatenate] (
[ identifier[np] . identifier[unique] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ]]
)
)
keyword[for] identifier[pfm] keyword[in] identifier[all_pfmethods] :
identifier[thispf_recfracs] =[]
keyword[for] identifier[periodbin_pv] , identifier[periodbin_rv] keyword[in] identifier[zip] ( identifier[periodbinned_periodicvars] ,
identifier[periodbinned_recovered_objects] ):
identifier[thisbin_thispf_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[periodbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[pfm] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thispf_recvars] ). identifier[size] /
identifier[periodbin_pv] . identifier[size]
)
identifier[thispf_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] ,
identifier[np] . identifier[array] ( identifier[thispf_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[pfm] . identifier[upper] (),
identifier[ms] = literal[int] )
identifier[periodbinned_per_pfmethod_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thispf_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] , identifier[periodbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_vartypes] = identifier[np] . identifier[unique] (
[( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[is] keyword[not] keyword[None] )]
)
keyword[for] identifier[vt] keyword[in] identifier[all_vartypes] :
identifier[thisvt_recfracs] =[]
keyword[for] identifier[periodbin_pv] , identifier[periodbin_rv] keyword[in] identifier[zip] ( identifier[periodbinned_periodicvars] ,
identifier[periodbinned_recovered_objects] ):
identifier[thisbin_thisvt_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[periodbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[vt] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisvt_recvars] ). identifier[size] /
identifier[periodbin_pv] . identifier[size]
)
identifier[thisvt_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] ,
identifier[np] . identifier[array] ( identifier[thisvt_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[vt] ,
identifier[ms] = literal[int] )
identifier[periodbinned_per_vartype_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thisvt_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] , identifier[periodbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_aliastypes] = identifier[recovered_status]
keyword[for] identifier[at] keyword[in] identifier[all_aliastypes] :
identifier[thisat_recfracs] =[]
keyword[for] identifier[periodbin_pv] , identifier[periodbin_rv] keyword[in] identifier[zip] (
identifier[periodbinned_periodicvars] ,
identifier[periodbinned_recovered_objects]
):
identifier[thisbin_thisat_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[periodbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ][ literal[int] ]== identifier[at] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisat_recvars] ). identifier[size] /
identifier[periodbin_pv] . identifier[size]
)
identifier[thisat_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] ,
identifier[np] . identifier[array] ( identifier[thisat_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[at] ,
identifier[ms] = literal[int] )
identifier[periodbinned_per_aliastype_recfracs] . identifier[append] ( identifier[np] . identifier[array] ( identifier[thisat_recfracs] ))
identifier[plt] . identifier[plot] ( identifier[periodbinned_periods] , identifier[periodbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] , identifier[amplitudebinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
keyword[for] identifier[magcol] keyword[in] identifier[magcols] :
identifier[thismagcol_recfracs] =[]
keyword[for] identifier[amplitudebin_pv] , identifier[amplitudebin_rv] keyword[in] identifier[zip] (
identifier[amplitudebinned_periodicvars] ,
identifier[amplitudebinned_recovered_objects]
):
identifier[thisbin_thismagcol_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[amplitudebin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[magcol] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thismagcol_recvars] ). identifier[size] /
identifier[amplitudebin_pv] . identifier[size]
)
identifier[thismagcol_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] ,
identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[magcol] ,
identifier[ms] = literal[int] )
identifier[amplitudebinned_per_magcol_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] , identifier[amplitudebinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_pfmethods] = identifier[np] . identifier[unique] (
identifier[np] . identifier[concatenate] (
[ identifier[np] . identifier[unique] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ]]
)
)
keyword[for] identifier[pfm] keyword[in] identifier[all_pfmethods] :
identifier[thispf_recfracs] =[]
keyword[for] identifier[amplitudebin_pv] , identifier[amplitudebin_rv] keyword[in] identifier[zip] (
identifier[amplitudebinned_periodicvars] ,
identifier[amplitudebinned_recovered_objects]
):
identifier[thisbin_thispf_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[amplitudebin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[pfm] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thispf_recvars] ). identifier[size] /
identifier[amplitudebin_pv] . identifier[size]
)
identifier[thispf_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] ,
identifier[np] . identifier[array] ( identifier[thispf_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[pfm] . identifier[upper] (),
identifier[ms] = literal[int] )
identifier[amplitudebinned_per_pfmethod_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thispf_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] , identifier[amplitudebinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_vartypes] = identifier[np] . identifier[unique] (
[( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[is] keyword[not] keyword[None] )]
)
keyword[for] identifier[vt] keyword[in] identifier[all_vartypes] :
identifier[thisvt_recfracs] =[]
keyword[for] identifier[amplitudebin_pv] , identifier[amplitudebin_rv] keyword[in] identifier[zip] (
identifier[amplitudebinned_periodicvars] ,
identifier[amplitudebinned_recovered_objects]
):
identifier[thisbin_thisvt_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[amplitudebin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[vt] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisvt_recvars] ). identifier[size] /
identifier[amplitudebin_pv] . identifier[size]
)
identifier[thisvt_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] ,
identifier[np] . identifier[array] ( identifier[thisvt_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[vt] ,
identifier[ms] = literal[int] )
identifier[amplitudebinned_per_vartype_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thisvt_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] , identifier[amplitudebinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_aliastypes] = identifier[recovered_status]
keyword[for] identifier[at] keyword[in] identifier[all_aliastypes] :
identifier[thisat_recfracs] =[]
keyword[for] identifier[amplitudebin_pv] , identifier[amplitudebin_rv] keyword[in] identifier[zip] (
identifier[amplitudebinned_periodicvars] ,
identifier[amplitudebinned_recovered_objects]
):
identifier[thisbin_thisat_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[amplitudebin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ][ literal[int] ]== identifier[at] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisat_recvars] ). identifier[size] /
identifier[amplitudebin_pv] . identifier[size]
)
identifier[thisat_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] ,
identifier[np] . identifier[array] ( identifier[thisat_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[at] ,
identifier[ms] = literal[int] )
identifier[amplitudebinned_per_aliastype_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thisat_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[amplitudebinned_amplitudes] , identifier[amplitudebinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] , identifier[ndetbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
keyword[for] identifier[magcol] keyword[in] identifier[magcols] :
identifier[thismagcol_recfracs] =[]
keyword[for] identifier[ndetbin_pv] , identifier[ndetbin_rv] keyword[in] identifier[zip] ( identifier[ndetbinned_periodicvars] ,
identifier[ndetbinned_recovered_objects] ):
identifier[thisbin_thismagcol_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[ndetbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[magcol] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thismagcol_recvars] ). identifier[size] /
identifier[ndetbin_pv] . identifier[size]
)
identifier[thismagcol_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] ,
identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[magcol] ,
identifier[ms] = literal[int] )
identifier[ndetbinned_per_magcol_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thismagcol_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] , identifier[ndetbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_pfmethods] = identifier[np] . identifier[unique] (
identifier[np] . identifier[concatenate] (
[ identifier[np] . identifier[unique] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ]]
)
)
keyword[for] identifier[pfm] keyword[in] identifier[all_pfmethods] :
identifier[thispf_recfracs] =[]
keyword[for] identifier[ndetbin_pv] , identifier[ndetbin_rv] keyword[in] identifier[zip] ( identifier[ndetbinned_periodicvars] ,
identifier[ndetbinned_recovered_objects] ):
identifier[thisbin_thispf_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[ndetbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[pfm] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thispf_recvars] ). identifier[size] /
identifier[ndetbin_pv] . identifier[size]
)
identifier[thispf_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] ,
identifier[np] . identifier[array] ( identifier[thispf_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[pfm] . identifier[upper] (),
identifier[ms] = literal[int] )
identifier[ndetbinned_per_pfmethod_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thispf_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] , identifier[ndetbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_vartypes] = identifier[np] . identifier[unique] (
[( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[in] identifier[PERIODIC_VARTYPES] )]
)
keyword[for] identifier[vt] keyword[in] identifier[all_vartypes] :
identifier[thisvt_recfracs] =[]
keyword[for] identifier[ndetbin_pv] , identifier[ndetbin_rv] keyword[in] identifier[zip] ( identifier[ndetbinned_periodicvars] ,
identifier[ndetbinned_recovered_objects] ):
identifier[thisbin_thisvt_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[ndetbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[vt] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisvt_recvars] ). identifier[size] /
identifier[ndetbin_pv] . identifier[size]
)
identifier[thisvt_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] ,
identifier[np] . identifier[array] ( identifier[thisvt_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[vt] ,
identifier[ms] = literal[int] )
identifier[ndetbinned_per_vartype_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thisvt_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] , identifier[ndetbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[all_aliastypes] = identifier[recovered_status]
keyword[for] identifier[at] keyword[in] identifier[all_aliastypes] :
identifier[thisat_recfracs] =[]
keyword[for] identifier[ndetbin_pv] , identifier[ndetbin_rv] keyword[in] identifier[zip] ( identifier[ndetbinned_periodicvars] ,
identifier[ndetbinned_recovered_objects] ):
identifier[thisbin_thisat_recvars] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[ndetbin_rv]
keyword[if] ( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ][ literal[int] ]== identifier[at] )
]
identifier[thisbin_thismagcol_recfrac] =(
identifier[np] . identifier[array] ( identifier[thisbin_thisat_recvars] ). identifier[size] /
identifier[ndetbin_pv] . identifier[size]
)
identifier[thisat_recfracs] . identifier[append] ( identifier[thisbin_thismagcol_recfrac] )
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] ,
identifier[np] . identifier[array] ( identifier[thisat_recfracs] ),
identifier[marker] = literal[string] ,
identifier[label] = literal[string] % identifier[at] ,
identifier[ms] = literal[int] )
identifier[ndetbinned_per_aliastype_recfracs] . identifier[append] (
identifier[np] . identifier[array] ( identifier[thisat_recfracs] )
)
identifier[plt] . identifier[plot] ( identifier[ndetbinned_ndets] , identifier[ndetbinned_recfrac] ,
identifier[marker] = literal[string] , identifier[ms] = literal[int] , identifier[label] = literal[string] , identifier[color] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[ylim] (( literal[int] , literal[int] ))
identifier[plt] . identifier[legend] ( identifier[markerscale] = literal[int] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[outdict] [ literal[string] ]=(
identifier[magbinned_per_magcol_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[magbinned_per_pfmethod_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[magbinned_per_vartype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[magbinned_per_aliastype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[periodbinned_per_magcol_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[periodbinned_per_pfmethod_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[periodbinned_per_vartype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[periodbinned_per_aliastype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[amplitudebinned_per_magcol_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[amplitudebinned_per_pfmethod_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[amplitudebinned_per_vartype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[amplitudebinned_per_aliastype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[ndetbinned_per_magcol_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[ndetbinned_per_pfmethod_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[ndetbinned_per_vartype_recfracs]
)
identifier[outdict] [ literal[string] ]=(
identifier[ndetbinned_per_aliastype_recfracs]
)
identifier[overall_recvars_per_pfmethod] =[]
keyword[for] identifier[pfm] keyword[in] identifier[all_pfmethods] :
identifier[thispfm_recvars] = identifier[np] . identifier[array] ([
identifier[x] keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
(( identifier[x] keyword[in] identifier[recovered_periodicvars] ) keyword[and]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[pfm] ))
])
identifier[overall_recvars_per_pfmethod] . identifier[append] ( identifier[thispfm_recvars] )
identifier[overall_recvars_per_vartype] =[]
keyword[for] identifier[vt] keyword[in] identifier[all_vartypes] :
identifier[thisvt_recvars] = identifier[np] . identifier[array] ([
identifier[x] keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
(( identifier[x] keyword[in] identifier[recovered_periodicvars] ) keyword[and]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[vt] ))
])
identifier[overall_recvars_per_vartype] . identifier[append] ( identifier[thisvt_recvars] )
identifier[overall_recvars_per_magcol] =[]
keyword[for] identifier[mc] keyword[in] identifier[magcols] :
identifier[thismc_recvars] = identifier[np] . identifier[array] ([
identifier[x] keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
(( identifier[x] keyword[in] identifier[recovered_periodicvars] ) keyword[and]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[mc] ))
])
identifier[overall_recvars_per_magcol] . identifier[append] ( identifier[thismc_recvars] )
identifier[overall_recvars_per_aliastype] =[]
keyword[for] identifier[at] keyword[in] identifier[all_aliastypes] :
identifier[thisat_recvars] = identifier[np] . identifier[array] ([
identifier[x] keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
(( identifier[x] keyword[in] identifier[recovered_periodicvars] ) keyword[and]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]== identifier[at] ))
])
identifier[overall_recvars_per_aliastype] . identifier[append] ( identifier[thisat_recvars] )
identifier[outdict] [ literal[string] ]= identifier[np] . identifier[array] ([
identifier[x] . identifier[size] / identifier[actual_periodicvars] . identifier[size] keyword[for] identifier[x] keyword[in] identifier[overall_recvars_per_pfmethod]
])
identifier[outdict] [ literal[string] ]= identifier[np] . identifier[array] ([
identifier[x] . identifier[size] / identifier[actual_periodicvars] . identifier[size] keyword[for] identifier[x] keyword[in] identifier[overall_recvars_per_vartype]
])
identifier[outdict] [ literal[string] ]= identifier[np] . identifier[array] ([
identifier[x] . identifier[size] / identifier[actual_periodicvars] . identifier[size] keyword[for] identifier[x] keyword[in] identifier[overall_recvars_per_magcol]
])
identifier[outdict] [ literal[string] ]= identifier[np] . identifier[array] ([
identifier[x] . identifier[size] / identifier[actual_periodicvars] . identifier[size] keyword[for] identifier[x] keyword[in] identifier[overall_recvars_per_aliastype]
])
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[xt] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[all_pfmethods] ))
identifier[xl] = identifier[all_pfmethods]
identifier[plt] . identifier[barh] ( identifier[xt] , identifier[outdict] [ literal[string] ], literal[int] )
identifier[plt] . identifier[yticks] ( identifier[xt] , identifier[xl] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[xt] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[magcols] ))
identifier[xl] = identifier[magcols]
identifier[plt] . identifier[barh] ( identifier[xt] , identifier[outdict] [ literal[string] ], literal[int] )
identifier[plt] . identifier[yticks] ( identifier[xt] , identifier[xl] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[xt] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[all_aliastypes] ))
identifier[xl] = identifier[all_aliastypes]
identifier[plt] . identifier[barh] ( identifier[xt] , identifier[outdict] [ literal[string] ], literal[int] )
identifier[plt] . identifier[yticks] ( identifier[xt] , identifier[xl] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[xt] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[all_vartypes] ))
identifier[xl] = identifier[all_vartypes]
identifier[plt] . identifier[barh] ( identifier[xt] , identifier[outdict] [ literal[string] ], literal[int] )
identifier[plt] . identifier[yticks] ( identifier[xt] , identifier[xl] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[notvariable_recovered_periods] = identifier[np] . identifier[concatenate] ([
identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[is] keyword[None] )
])
identifier[notvariable_recovered_lspvals] = identifier[np] . identifier[concatenate] ([
identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ]
keyword[for] identifier[x] keyword[in] identifier[precvar] [ literal[string] ] keyword[if]
( identifier[precvar] [ literal[string] ][ identifier[x] ][ literal[string] ] keyword[is] keyword[None] )
])
identifier[sortind] = identifier[np] . identifier[argsort] ( identifier[notvariable_recovered_periods] )
identifier[notvariable_recovered_periods] = identifier[notvariable_recovered_periods] [ identifier[sortind] ]
identifier[notvariable_recovered_lspvals] = identifier[notvariable_recovered_lspvals] [ identifier[sortind] ]
identifier[outdict] [ literal[string] ]= identifier[notvariable_recovered_periods]
identifier[outdict] [ literal[string] ]= identifier[notvariable_recovered_lspvals]
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[plt] . identifier[plot] ( identifier[notvariable_recovered_periods] ,
identifier[notvariable_recovered_lspvals] ,
identifier[ms] = literal[int] , identifier[linestyle] = literal[string] , identifier[marker] = literal[string] )
identifier[plt] . identifier[xscale] ( literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] * literal[int] , literal[int] * literal[int] ))
identifier[plt] . identifier[hist] ( identifier[notvariable_recovered_periods] , identifier[bins] = identifier[np] . identifier[arange] ( literal[int] , literal[int] , literal[int] ),
identifier[histtype] = literal[string] )
identifier[plt] . identifier[xscale] ( literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[title] ( literal[string] )
identifier[plt] . identifier[savefig] (
identifier[os] . identifier[path] . identifier[join] ( identifier[recplotdir] ,
literal[string] % identifier[plotfile_ext] ),
identifier[dpi] = literal[int] ,
identifier[bbox_inches] = literal[string]
)
identifier[plt] . identifier[close] ( literal[string] )
identifier[outfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[simbasedir] , literal[string] )
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] ( identifier[outdict] , identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] )
keyword[return] identifier[outdict] | def plot_periodicvar_recovery_results(precvar_results, aliases_count_as_recovered=None, magbins=None, periodbins=None, amplitudebins=None, ndetbins=None, minbinsize=1, plotfile_ext='png'):
"""This plots the results of periodic var recovery.
This function makes plots for periodicvar recovered fraction as a function
of:
- magbin
- periodbin
- amplitude of variability
- ndet
with plot lines broken down by:
- magcol
- periodfinder
- vartype
- recovery status
The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be
used to set the bin lists as needed. The kwarg `minbinsize` controls how
many elements per bin are required to accept a bin in processing its
recovery characteristics for mags, periods, amplitudes, and ndets.
Parameters
----------
precvar_results : dict or str
This is either a dict returned by parallel_periodicvar_recovery or the
pickle created by that function.
aliases_count_as_recovered : list of str or 'all'
This is used to set which kinds of aliases this function considers as
'recovered' objects. Normally, we require that recovered objects have a
recovery status of 'actual' to indicate the actual period was
recovered. To change this default behavior, aliases_count_as_recovered
can be set to a list of alias status strings that should be considered
as 'recovered' objects as well. Choose from the following alias types::
'twice' recovered_p = 2.0*actual_p
'half' recovered_p = 0.5*actual_p
'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)
'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)
'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)
'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)
'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)
'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)
'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)
'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)
or set `aliases_count_as_recovered='all'` to include all of the above in
the 'recovered' periodic var list.
magbins : np.array
The magnitude bins to plot the recovery rate results over. If None, the
default mag bins will be used: `np.arange(8.0,16.25,0.25)`.
periodbins : np.array
The period bins to plot the recovery rate results over. If None, the
default period bins will be used: `np.arange(0.0,500.0,0.5)`.
amplitudebins : np.array
The variability amplitude bins to plot the recovery rate results
over. If None, the default amplitude bins will be used:
`np.arange(0.0,2.0,0.05)`.
ndetbins : np.array
The ndet bins to plot the recovery rate results over. If None, the
default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`.
minbinsize : int
The minimum number of objects per bin required to plot a bin and its
recovery fraction on the plot.
plotfile_ext : {'png','pdf'}
Sets the plot output files' extension.
Returns
-------
dict
A dict containing recovery fraction statistics and the paths to each of
the plots made.
"""
# get the result pickle/dict
if isinstance(precvar_results, str) and os.path.exists(precvar_results):
with open(precvar_results, 'rb') as infd:
precvar = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=[]]
elif isinstance(precvar_results, dict):
precvar = precvar_results # depends on [control=['if'], data=[]]
else:
LOGERROR('could not understand the input periodic var recovery dict/pickle')
return None
# get the simbasedir and open the fakelc-info.pkl. we'll need the magbins
# definition from here.
simbasedir = precvar['simbasedir']
lcinfof = os.path.join(simbasedir, 'fakelcs-info.pkl')
if not os.path.exists(lcinfof):
LOGERROR("fakelcs-info.pkl does not exist in %s, can't continue" % simbasedir)
return None # depends on [control=['if'], data=[]]
with open(lcinfof, 'rb') as infd:
lcinfo = pickle.load(infd) # depends on [control=['with'], data=['infd']]
# get the magcols, vartypes, sdssr, isvariable flags
magcols = lcinfo['magcols']
objectid = lcinfo['objectid']
ndet = lcinfo['ndet']
sdssr = lcinfo['sdssr']
# get the actual periodic vars
actual_periodicvars = precvar['actual_periodicvars']
# generate lists of objects binned by magbins and periodbins
LOGINFO('getting sdssr and ndet for actual periodic vars...')
# get the sdssr and ndet for all periodic vars
periodicvar_sdssr = []
periodicvar_ndet = []
periodicvar_objectids = []
for pobj in actual_periodicvars:
pobjind = objectid == pobj
periodicvar_objectids.append(pobj)
periodicvar_sdssr.append(sdssr[pobjind])
periodicvar_ndet.append(ndet[pobjind]) # depends on [control=['for'], data=['pobj']]
periodicvar_sdssr = np.array(periodicvar_sdssr)
periodicvar_objectids = np.array(periodicvar_objectids)
periodicvar_ndet = np.array(periodicvar_ndet)
LOGINFO('getting periods, vartypes, amplitudes, ndet for actual periodic vars...')
# get the periods, vartypes, amplitudes for the actual periodic vars
periodicvar_periods = [np.asscalar(precvar['details'][x]['actual_varperiod']) for x in periodicvar_objectids]
periodicvar_amplitudes = [np.asscalar(precvar['details'][x]['actual_varamplitude']) for x in periodicvar_objectids]
periodicvar_vartypes = [precvar['details'][x]['actual_vartype'] for x in periodicvar_objectids]
#
# do the binning
#
# bin by mag
LOGINFO('binning actual periodic vars by magnitude...')
magbinned_sdssr = []
magbinned_periodicvars = []
if not magbins:
magbins = PERIODREC_DEFAULT_MAGBINS # depends on [control=['if'], data=[]]
magbininds = np.digitize(np.ravel(periodicvar_sdssr), magbins)
for (mbinind, magi) in zip(np.unique(magbininds), range(len(magbins) - 1)):
thisbin_periodicvars = periodicvar_objectids[magbininds == mbinind]
if thisbin_periodicvars.size > minbinsize - 1:
magbinned_sdssr.append((magbins[magi] + magbins[magi + 1]) / 2.0)
magbinned_periodicvars.append(thisbin_periodicvars) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# bin by period
LOGINFO('binning actual periodic vars by period...')
periodbinned_periods = []
periodbinned_periodicvars = []
if not periodbins:
periodbins = PERIODREC_DEFAULT_PERIODBINS # depends on [control=['if'], data=[]]
periodbininds = np.digitize(np.ravel(periodicvar_periods), periodbins)
for (pbinind, peri) in zip(np.unique(periodbininds), range(len(periodbins) - 1)):
thisbin_periodicvars = periodicvar_objectids[periodbininds == pbinind]
if thisbin_periodicvars.size > minbinsize - 1:
periodbinned_periods.append((periodbins[peri] + periodbins[peri + 1]) / 2.0)
periodbinned_periodicvars.append(thisbin_periodicvars) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# bin by amplitude of variability
LOGINFO('binning actual periodic vars by variability amplitude...')
amplitudebinned_amplitudes = []
amplitudebinned_periodicvars = []
if not amplitudebins:
amplitudebins = PERIODREC_DEFAULT_AMPBINS # depends on [control=['if'], data=[]]
amplitudebininds = np.digitize(np.ravel(np.abs(periodicvar_amplitudes)), amplitudebins)
for (abinind, ampi) in zip(np.unique(amplitudebininds), range(len(amplitudebins) - 1)):
thisbin_periodicvars = periodicvar_objectids[amplitudebininds == abinind]
if thisbin_periodicvars.size > minbinsize - 1:
amplitudebinned_amplitudes.append((amplitudebins[ampi] + amplitudebins[ampi + 1]) / 2.0)
amplitudebinned_periodicvars.append(thisbin_periodicvars) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# bin by ndet
LOGINFO('binning actual periodic vars by ndet...')
ndetbinned_ndets = []
ndetbinned_periodicvars = []
if not ndetbins:
ndetbins = PERIODREC_DEFAULT_NDETBINS # depends on [control=['if'], data=[]]
ndetbininds = np.digitize(np.ravel(periodicvar_ndet), ndetbins)
for (nbinind, ndeti) in zip(np.unique(ndetbininds), range(len(ndetbins) - 1)):
thisbin_periodicvars = periodicvar_objectids[ndetbininds == nbinind]
if thisbin_periodicvars.size > minbinsize - 1:
ndetbinned_ndets.append((ndetbins[ndeti] + ndetbins[ndeti + 1]) / 2.0)
ndetbinned_periodicvars.append(thisbin_periodicvars) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# now figure out what 'recovered' means using the provided
# aliases_count_as_recovered kwarg
recovered_status = ['actual']
if isinstance(aliases_count_as_recovered, list):
for atype in aliases_count_as_recovered:
if atype in ALIAS_TYPES:
recovered_status.append(atype) # depends on [control=['if'], data=['atype']]
else:
LOGWARNING('unknown alias type: %s, skipping' % atype) # depends on [control=['for'], data=['atype']] # depends on [control=['if'], data=[]]
elif aliases_count_as_recovered and aliases_count_as_recovered == 'all':
for atype in ALIAS_TYPES[1:]:
recovered_status.append(atype) # depends on [control=['for'], data=['atype']] # depends on [control=['if'], data=[]]
# find all the matching objects for these recovered statuses
recovered_periodicvars = np.array([precvar['details'][x]['objectid'] for x in precvar['details'] if precvar['details'][x] is not None and precvar['details'][x]['best_recovered_status'] in recovered_status], dtype=np.unicode_)
LOGINFO('recovered %s/%s periodic variables (frac: %.3f) with period recovery status: %s' % (recovered_periodicvars.size, actual_periodicvars.size, float(recovered_periodicvars.size / actual_periodicvars.size), ', '.join(recovered_status)))
# get the objects recovered per bin and overall recovery fractions per bin
magbinned_recovered_objects = [np.intersect1d(x, recovered_periodicvars) for x in magbinned_periodicvars]
magbinned_recfrac = np.array([float(x.size / y.size) for (x, y) in zip(magbinned_recovered_objects, magbinned_periodicvars)])
periodbinned_recovered_objects = [np.intersect1d(x, recovered_periodicvars) for x in periodbinned_periodicvars]
periodbinned_recfrac = np.array([float(x.size / y.size) for (x, y) in zip(periodbinned_recovered_objects, periodbinned_periodicvars)])
amplitudebinned_recovered_objects = [np.intersect1d(x, recovered_periodicvars) for x in amplitudebinned_periodicvars]
amplitudebinned_recfrac = np.array([float(x.size / y.size) for (x, y) in zip(amplitudebinned_recovered_objects, amplitudebinned_periodicvars)])
ndetbinned_recovered_objects = [np.intersect1d(x, recovered_periodicvars) for x in ndetbinned_periodicvars]
ndetbinned_recfrac = np.array([float(x.size / y.size) for (x, y) in zip(ndetbinned_recovered_objects, ndetbinned_periodicvars)])
# convert the bin medians to arrays
magbinned_sdssr = np.array(magbinned_sdssr)
periodbinned_periods = np.array(periodbinned_periods)
amplitudebinned_amplitudes = np.array(amplitudebinned_amplitudes)
ndetbinned_ndets = np.array(ndetbinned_ndets)
# this is the initial output dict
# mag binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
# period binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
# amplitude binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
# ndet binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
outdict = {'simbasedir': simbasedir, 'precvar_results': precvar, 'magcols': magcols, 'objectids': objectid, 'ndet': ndet, 'sdssr': sdssr, 'actual_periodicvars': actual_periodicvars, 'recovered_periodicvars': recovered_periodicvars, 'recovery_definition': recovered_status, 'magbins': magbins, 'magbinned_mags': magbinned_sdssr, 'magbinned_periodicvars': magbinned_periodicvars, 'magbinned_recoveredvars': magbinned_recovered_objects, 'magbinned_recfrac': magbinned_recfrac, 'periodbins': periodbins, 'periodbinned_periods': periodbinned_periods, 'periodbinned_periodicvars': periodbinned_periodicvars, 'periodbinned_recoveredvars': periodbinned_recovered_objects, 'periodbinned_recfrac': periodbinned_recfrac, 'amplitudebins': amplitudebins, 'amplitudebinned_amplitudes': amplitudebinned_amplitudes, 'amplitudebinned_periodicvars': amplitudebinned_periodicvars, 'amplitudebinned_recoveredvars': amplitudebinned_recovered_objects, 'amplitudebinned_recfrac': amplitudebinned_recfrac, 'ndetbins': ndetbins, 'ndetbinned_ndets': ndetbinned_ndets, 'ndetbinned_periodicvars': ndetbinned_periodicvars, 'ndetbinned_recoveredvars': ndetbinned_recovered_objects, 'ndetbinned_recfrac': ndetbinned_recfrac}
# figure out which pfmethods were used
all_pfmethods = np.unique(np.concatenate([np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']]))
# figure out all vartypes
all_vartypes = np.unique([precvar['details'][x]['actual_vartype'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] is not None])
# figure out all alias types
all_aliastypes = recovered_status
# add these to the outdict
outdict['aliastypes'] = all_aliastypes
outdict['pfmethods'] = all_pfmethods
outdict['vartypes'] = all_vartypes
# these are recfracs per-magcol, -vartype, -periodfinder, -aliastype
# binned appropriately by mags, periods, amplitudes, and ndet
# all of these have the shape as the magcols, aliastypes, pfmethods, and
# vartypes lists above.
magbinned_per_magcol_recfracs = []
magbinned_per_vartype_recfracs = []
magbinned_per_pfmethod_recfracs = []
magbinned_per_aliastype_recfracs = []
periodbinned_per_magcol_recfracs = []
periodbinned_per_vartype_recfracs = []
periodbinned_per_pfmethod_recfracs = []
periodbinned_per_aliastype_recfracs = []
amplitudebinned_per_magcol_recfracs = []
amplitudebinned_per_vartype_recfracs = []
amplitudebinned_per_pfmethod_recfracs = []
amplitudebinned_per_aliastype_recfracs = []
ndetbinned_per_magcol_recfracs = []
ndetbinned_per_vartype_recfracs = []
ndetbinned_per_pfmethod_recfracs = []
ndetbinned_per_aliastype_recfracs = []
#
# finally, we do stuff for the plots!
#
recplotdir = os.path.join(simbasedir, 'periodic-variable-recovery-plots')
if not os.path.exists(recplotdir):
os.mkdir(recplotdir) # depends on [control=['if'], data=[]]
# 1. recovery-rate by magbin
# 1a. plot of overall recovery rate per magbin
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0)
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-magnitudes-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 1b. plot of recovery rate per magbin per magcol
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
for magcol in magcols:
thismagcol_recfracs = []
for (magbin_pv, magbin_rv) in zip(magbinned_periodicvars, magbinned_recovered_objects):
thisbin_thismagcol_recvars = [x for x in magbin_rv if precvar['details'][x]['best_recovered_magcol'] == magcol]
thisbin_thismagcol_recfrac = np.array(thisbin_thismagcol_recvars).size / magbin_pv.size
thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0)
# add this to the outdict array
magbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs)) # depends on [control=['for'], data=['magcol']]
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-magnitudes-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 1c. plot of recovery rate per magbin per periodfinder
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(np.concatenate([np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']]))
for pfm in all_pfmethods:
thispf_recfracs = []
for (magbin_pv, magbin_rv) in zip(magbinned_periodicvars, magbinned_recovered_objects):
thisbin_thispf_recvars = [x for x in magbin_rv if precvar['details'][x]['best_recovered_pfmethod'] == pfm]
thisbin_thismagcol_recfrac = np.array(thisbin_thispf_recvars).size / magbin_pv.size
thispf_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0)
# add this to the outdict array
magbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs)) # depends on [control=['for'], data=['pfm']]
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 1d. plot of recovery rate per magbin per variable type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_vartypes = np.unique([precvar['details'][x]['actual_vartype'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] is not None])
for vt in all_vartypes:
thisvt_recfracs = []
for (magbin_pv, magbin_rv) in zip(magbinned_periodicvars, magbinned_recovered_objects):
thisbin_thisvt_recvars = [x for x in magbin_rv if precvar['details'][x]['actual_vartype'] == vt]
thisbin_thismagcol_recfrac = np.array(thisbin_thisvt_recvars).size / magbin_pv.size
thisvt_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0)
# add this to the outdict array
magbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs)) # depends on [control=['for'], data=['vt']]
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-magnitudes-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 1e. plot of recovery rate per magbin per alias type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all alias types
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for (magbin_pv, magbin_rv) in zip(magbinned_periodicvars, magbinned_recovered_objects):
thisbin_thisat_recvars = [x for x in magbin_rv if precvar['details'][x]['best_recovered_status'][0] == at]
thisbin_thismagcol_recfrac = np.array(thisbin_thisat_recvars).size / magbin_pv.size
thisat_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0)
# add this to the outdict array
magbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs)) # depends on [control=['for'], data=['at']]
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-magnitudes-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 2. recovery-rate by periodbin
# 2a. plot of overall recovery rate per periodbin
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0)
plt.xlabel('periodic variable period [days]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var periods')
plt.ylim((0, 1))
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-periods-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 2b. plot of recovery rate per periodbin per magcol
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
for magcol in magcols:
thismagcol_recfracs = []
for (periodbin_pv, periodbin_rv) in zip(periodbinned_periodicvars, periodbinned_recovered_objects):
thisbin_thismagcol_recvars = [x for x in periodbin_rv if precvar['details'][x]['best_recovered_magcol'] == magcol]
thisbin_thismagcol_recfrac = np.array(thisbin_thismagcol_recvars).size / periodbin_pv.size
thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0)
# add this to the outdict array
periodbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs)) # depends on [control=['for'], data=['magcol']]
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var periods')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-periods-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 2c. plot of recovery rate per periodbin per periodfinder
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(np.concatenate([np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']]))
for pfm in all_pfmethods:
thispf_recfracs = []
for (periodbin_pv, periodbin_rv) in zip(periodbinned_periodicvars, periodbinned_recovered_objects):
thisbin_thispf_recvars = [x for x in periodbin_rv if precvar['details'][x]['best_recovered_pfmethod'] == pfm]
thisbin_thismagcol_recfrac = np.array(thisbin_thispf_recvars).size / periodbin_pv.size
thispf_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0)
# add this to the outdict array
periodbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs)) # depends on [control=['for'], data=['pfm']]
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var periods')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-periods-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 2d. plot of recovery rate per periodbin per variable type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_vartypes = np.unique([precvar['details'][x]['actual_vartype'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] is not None])
for vt in all_vartypes:
thisvt_recfracs = []
for (periodbin_pv, periodbin_rv) in zip(periodbinned_periodicvars, periodbinned_recovered_objects):
thisbin_thisvt_recvars = [x for x in periodbin_rv if precvar['details'][x]['actual_vartype'] == vt]
thisbin_thismagcol_recfrac = np.array(thisbin_thisvt_recvars).size / periodbin_pv.size
thisvt_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0)
# add this to the outdict array
periodbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs)) # depends on [control=['for'], data=['vt']]
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-periods-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 2e. plot of recovery rate per periodbin per alias type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for (periodbin_pv, periodbin_rv) in zip(periodbinned_periodicvars, periodbinned_recovered_objects):
thisbin_thisat_recvars = [x for x in periodbin_rv if precvar['details'][x]['best_recovered_status'][0] == at]
thisbin_thismagcol_recfrac = np.array(thisbin_thisat_recvars).size / periodbin_pv.size
thisat_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0)
# add this to the outdict array
periodbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs)) # depends on [control=['for'], data=['at']]
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-periods-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 3. recovery-rate by amplitude bin
# 3a. plot of overall recovery rate per amplitude bin
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.', ms=0.0)
plt.xlabel('periodic variable amplitude [mag]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var amplitudes')
plt.ylim((0, 1))
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-amplitudes-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 3b. plot of recovery rate per amplitude bin per magcol
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
for magcol in magcols:
thismagcol_recfracs = []
for (amplitudebin_pv, amplitudebin_rv) in zip(amplitudebinned_periodicvars, amplitudebinned_recovered_objects):
thisbin_thismagcol_recvars = [x for x in amplitudebin_rv if precvar['details'][x]['best_recovered_magcol'] == magcol]
thisbin_thismagcol_recfrac = np.array(thisbin_thismagcol_recvars).size / amplitudebin_pv.size
thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0)
# add this to the outdict array
amplitudebinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs)) # depends on [control=['for'], data=['magcol']]
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var amplitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-amplitudes-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 3c. plot of recovery rate per amplitude bin per periodfinder
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(np.concatenate([np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']]))
for pfm in all_pfmethods:
thispf_recfracs = []
for (amplitudebin_pv, amplitudebin_rv) in zip(amplitudebinned_periodicvars, amplitudebinned_recovered_objects):
thisbin_thispf_recvars = [x for x in amplitudebin_rv if precvar['details'][x]['best_recovered_pfmethod'] == pfm]
thisbin_thismagcol_recfrac = np.array(thisbin_thispf_recvars).size / amplitudebin_pv.size
thispf_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0)
# add this to the outdict array
amplitudebinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs)) # depends on [control=['for'], data=['pfm']]
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var amplitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 3d. plot of recovery rate per amplitude bin per variable type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_vartypes = np.unique([precvar['details'][x]['actual_vartype'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] is not None])
for vt in all_vartypes:
thisvt_recfracs = []
for (amplitudebin_pv, amplitudebin_rv) in zip(amplitudebinned_periodicvars, amplitudebinned_recovered_objects):
thisbin_thisvt_recvars = [x for x in amplitudebin_rv if precvar['details'][x]['actual_vartype'] == vt]
thisbin_thismagcol_recfrac = np.array(thisbin_thisvt_recvars).size / amplitudebin_pv.size
thisvt_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0)
# add this to the outdict array
amplitudebinned_per_vartype_recfracs.append(np.array(thisvt_recfracs)) # depends on [control=['for'], data=['vt']]
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var amplitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-amplitudes-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 3e. plot of recovery rate per amplitude bin per alias type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for (amplitudebin_pv, amplitudebin_rv) in zip(amplitudebinned_periodicvars, amplitudebinned_recovered_objects):
thisbin_thisat_recvars = [x for x in amplitudebin_rv if precvar['details'][x]['best_recovered_status'][0] == at]
thisbin_thismagcol_recfrac = np.array(thisbin_thisat_recvars).size / amplitudebin_pv.size
thisat_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0)
# add this to the outdict array
amplitudebinned_per_aliastype_recfracs.append(np.array(thisat_recfracs)) # depends on [control=['for'], data=['at']]
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var amplitudes')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-amplitudes-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 4. recovery-rate by ndet bin
# 4a. plot of overall recovery rate per ndet bin
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0)
plt.xlabel('periodic variable light curve points')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var ndet')
plt.ylim((0, 1))
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-ndet-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 4b. plot of recovery rate per ndet bin per magcol
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
for magcol in magcols:
thismagcol_recfracs = []
for (ndetbin_pv, ndetbin_rv) in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects):
thisbin_thismagcol_recvars = [x for x in ndetbin_rv if precvar['details'][x]['best_recovered_magcol'] == magcol]
thisbin_thismagcol_recfrac = np.array(thisbin_thismagcol_recvars).size / ndetbin_pv.size
thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0)
# add this to the outdict array
ndetbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs)) # depends on [control=['for'], data=['magcol']]
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var ndets')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-ndet-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 4c. plot of recovery rate per ndet bin per periodfinder
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(np.concatenate([np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']]))
for pfm in all_pfmethods:
thispf_recfracs = []
for (ndetbin_pv, ndetbin_rv) in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects):
thisbin_thispf_recvars = [x for x in ndetbin_rv if precvar['details'][x]['best_recovered_pfmethod'] == pfm]
thisbin_thismagcol_recfrac = np.array(thisbin_thispf_recvars).size / ndetbin_pv.size
thispf_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0)
# add this to the outdict array
ndetbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs)) # depends on [control=['for'], data=['pfm']]
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var ndets')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-ndet-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 4d. plot of recovery rate per ndet bin per variable type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_vartypes = np.unique([precvar['details'][x]['actual_vartype'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES])
for vt in all_vartypes:
thisvt_recfracs = []
for (ndetbin_pv, ndetbin_rv) in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects):
thisbin_thisvt_recvars = [x for x in ndetbin_rv if precvar['details'][x]['actual_vartype'] == vt]
thisbin_thismagcol_recfrac = np.array(thisbin_thisvt_recvars).size / ndetbin_pv.size
thisvt_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0)
# add this to the outdict array
ndetbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs)) # depends on [control=['for'], data=['vt']]
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var ndets')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-ndet-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 4e. plot of recovery rate per ndet bin per alias type
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for (ndetbin_pv, ndetbin_rv) in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects):
thisbin_thisat_recvars = [x for x in ndetbin_rv if precvar['details'][x]['best_recovered_status'][0] == at]
thisbin_thismagcol_recfrac = np.array(thisbin_thisat_recvars).size / ndetbin_pv.size
thisat_recfracs.append(thisbin_thismagcol_recfrac) # depends on [control=['for'], data=[]]
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0)
# add this to the outdict array
ndetbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs)) # depends on [control=['for'], data=['at']]
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label='overall', color='k')
plt.xlabel('SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var ndets')
plt.ylim((0, 1))
plt.legend(markerscale=10.0)
plt.savefig(os.path.join(recplotdir, 'recfrac-binned-ndet-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# update the lists in the outdict
outdict['magbinned_per_magcol_recfracs'] = magbinned_per_magcol_recfracs
outdict['magbinned_per_pfmethod_recfracs'] = magbinned_per_pfmethod_recfracs
outdict['magbinned_per_vartype_recfracs'] = magbinned_per_vartype_recfracs
outdict['magbinned_per_aliastype_recfracs'] = magbinned_per_aliastype_recfracs
outdict['periodbinned_per_magcol_recfracs'] = periodbinned_per_magcol_recfracs
outdict['periodbinned_per_pfmethod_recfracs'] = periodbinned_per_pfmethod_recfracs
outdict['periodbinned_per_vartype_recfracs'] = periodbinned_per_vartype_recfracs
outdict['periodbinned_per_aliastype_recfracs'] = periodbinned_per_aliastype_recfracs
outdict['amplitudebinned_per_magcol_recfracs'] = amplitudebinned_per_magcol_recfracs
outdict['amplitudebinned_per_pfmethod_recfracs'] = amplitudebinned_per_pfmethod_recfracs
outdict['amplitudebinned_per_vartype_recfracs'] = amplitudebinned_per_vartype_recfracs
outdict['amplitudebinned_per_aliastype_recfracs'] = amplitudebinned_per_aliastype_recfracs
outdict['ndetbinned_per_magcol_recfracs'] = ndetbinned_per_magcol_recfracs
outdict['ndetbinned_per_pfmethod_recfracs'] = ndetbinned_per_pfmethod_recfracs
outdict['ndetbinned_per_vartype_recfracs'] = ndetbinned_per_vartype_recfracs
outdict['ndetbinned_per_aliastype_recfracs'] = ndetbinned_per_aliastype_recfracs
# get the overall recovered vars per pfmethod
overall_recvars_per_pfmethod = []
for pfm in all_pfmethods:
thispfm_recvars = np.array([x for x in precvar['details'] if x in recovered_periodicvars and precvar['details'][x]['best_recovered_pfmethod'] == pfm])
overall_recvars_per_pfmethod.append(thispfm_recvars) # depends on [control=['for'], data=['pfm']]
# get the overall recovered vars per vartype
overall_recvars_per_vartype = []
for vt in all_vartypes:
thisvt_recvars = np.array([x for x in precvar['details'] if x in recovered_periodicvars and precvar['details'][x]['actual_vartype'] == vt])
overall_recvars_per_vartype.append(thisvt_recvars) # depends on [control=['for'], data=['vt']]
# get the overall recovered vars per magcol
overall_recvars_per_magcol = []
for mc in magcols:
thismc_recvars = np.array([x for x in precvar['details'] if x in recovered_periodicvars and precvar['details'][x]['best_recovered_magcol'] == mc])
overall_recvars_per_magcol.append(thismc_recvars) # depends on [control=['for'], data=['mc']]
# get the overall recovered vars per aliastype
overall_recvars_per_aliastype = []
for at in all_aliastypes:
thisat_recvars = np.array([x for x in precvar['details'] if x in recovered_periodicvars and precvar['details'][x]['best_recovered_status'] == at])
overall_recvars_per_aliastype.append(thisat_recvars) # depends on [control=['for'], data=['at']]
# update the outdict with these
outdict['overall_recfrac_per_pfmethod'] = np.array([x.size / actual_periodicvars.size for x in overall_recvars_per_pfmethod])
outdict['overall_recfrac_per_vartype'] = np.array([x.size / actual_periodicvars.size for x in overall_recvars_per_vartype])
outdict['overall_recfrac_per_magcol'] = np.array([x.size / actual_periodicvars.size for x in overall_recvars_per_magcol])
outdict['overall_recfrac_per_aliastype'] = np.array([x.size / actual_periodicvars.size for x in overall_recvars_per_aliastype])
# 5. bar plot of overall recovery rate per pfmethod
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
xt = np.arange(len(all_pfmethods))
xl = all_pfmethods
plt.barh(xt, outdict['overall_recfrac_per_pfmethod'], 0.5)
plt.yticks(xt, xl)
plt.xlabel('period-finding method')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period-finding method')
plt.savefig(os.path.join(recplotdir, 'recfrac-overall-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 6. bar plot of overall recovery rate per magcol
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
xt = np.arange(len(magcols))
xl = magcols
plt.barh(xt, outdict['overall_recfrac_per_magcol'], 0.5)
plt.yticks(xt, xl)
plt.xlabel('light curve magnitude column')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per light curve magcol')
plt.savefig(os.path.join(recplotdir, 'recfrac-overall-magcol.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 7. bar plot of overall recovery rate per aliastype
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
xt = np.arange(len(all_aliastypes))
xl = all_aliastypes
plt.barh(xt, outdict['overall_recfrac_per_aliastype'], 0.5)
plt.yticks(xt, xl)
plt.xlabel('period recovery status')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period recovery status')
plt.savefig(os.path.join(recplotdir, 'recfrac-overall-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 8. bar plot of overall recovery rate per vartype
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
xt = np.arange(len(all_vartypes))
xl = all_vartypes
plt.barh(xt, outdict['overall_recfrac_per_vartype'], 0.5)
plt.yticks(xt, xl)
plt.xlabel('periodic variable type')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per periodic variable type')
plt.savefig(os.path.join(recplotdir, 'recfrac-overall-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 9. overall recovered period periodogram for objects that aren't actual
# periodic variables. this effectively should give us the window function of
# the observations
notvariable_recovered_periods = np.concatenate([precvar['details'][x]['recovery_periods'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] is None])
notvariable_recovered_lspvals = np.concatenate([precvar['details'][x]['recovery_lspvals'] for x in precvar['details'] if precvar['details'][x]['actual_vartype'] is None])
sortind = np.argsort(notvariable_recovered_periods)
notvariable_recovered_periods = notvariable_recovered_periods[sortind]
notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind]
outdict['notvariable_recovered_periods'] = notvariable_recovered_periods
outdict['notvariable_recovered_lspvals'] = notvariable_recovered_lspvals
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
plt.plot(notvariable_recovered_periods, notvariable_recovered_lspvals, ms=1.0, linestyle='none', marker='.')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('recovered normalized periodogram power')
plt.title('periodogram for actual not-variable objects')
plt.savefig(os.path.join(recplotdir, 'recovered-periodogram-nonvariables.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# 10. overall recovered period histogram for objects marked
# not-variable. this gives us the most common periods
fig = plt.figure(figsize=(6.4 * 1.5, 4.8 * 1.5))
plt.hist(notvariable_recovered_periods, bins=np.arange(0.02, 300.0, 0.001), histtype='step')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('number of times periods recovered')
plt.title('recovered period histogram for non-variable objects')
plt.savefig(os.path.join(recplotdir, 'recovered-period-hist-nonvariables.%s' % plotfile_ext), dpi=100, bbox_inches='tight')
plt.close('all')
# at the end, write the outdict to a pickle and return it
outfile = os.path.join(simbasedir, 'periodicvar-recovery-plotresults.pkl')
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
return outdict |
def start(endpoint='data.logentries.com',
port=10000,
token=None,
tag='salt/engines/logentries'):
'''
Listen to salt events and forward them to Logentries
'''
if __opts__.get('id').endswith('_master'):
event_bus = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir'],
listen=True)
else:
event_bus = salt.utils.event.get_event(
'minion',
transport=__opts__['transport'],
opts=__opts__,
sock_dir=__opts__['sock_dir'],
listen=True)
log.debug('Logentries engine started')
try:
val = uuid.UUID(token)
except ValueError:
log.warning('Not a valid logentries token')
appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port)
appender.reopen_connection()
while True:
event = event_bus.get_event()
if event:
# future lint: disable=blacklisted-function
msg = str(' ').join((
salt.utils.stringutils.to_str(token),
salt.utils.stringutils.to_str(tag),
salt.utils.json.dumps(event)
))
# future lint: enable=blacklisted-function
appender.put(msg)
appender.close_connection() | def function[start, parameter[endpoint, port, token, tag]]:
constant[
Listen to salt events and forward them to Logentries
]
if call[call[name[__opts__].get, parameter[constant[id]]].endswith, parameter[constant[_master]]] begin[:]
variable[event_bus] assign[=] call[name[salt].utils.event.get_master_event, parameter[name[__opts__], call[name[__opts__]][constant[sock_dir]]]]
call[name[log].debug, parameter[constant[Logentries engine started]]]
<ast.Try object at 0x7da18f09cd90>
variable[appender] assign[=] call[name[SocketAppender], parameter[]]
call[name[appender].reopen_connection, parameter[]]
while constant[True] begin[:]
variable[event] assign[=] call[name[event_bus].get_event, parameter[]]
if name[event] begin[:]
variable[msg] assign[=] call[call[name[str], parameter[constant[ ]]].join, parameter[tuple[[<ast.Call object at 0x7da20c7cafb0>, <ast.Call object at 0x7da20c7cb910>, <ast.Call object at 0x7da204620370>]]]]
call[name[appender].put, parameter[name[msg]]]
call[name[appender].close_connection, parameter[]] | keyword[def] identifier[start] ( identifier[endpoint] = literal[string] ,
identifier[port] = literal[int] ,
identifier[token] = keyword[None] ,
identifier[tag] = literal[string] ):
literal[string]
keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ). identifier[endswith] ( literal[string] ):
identifier[event_bus] = identifier[salt] . identifier[utils] . identifier[event] . identifier[get_master_event] (
identifier[__opts__] ,
identifier[__opts__] [ literal[string] ],
identifier[listen] = keyword[True] )
keyword[else] :
identifier[event_bus] = identifier[salt] . identifier[utils] . identifier[event] . identifier[get_event] (
literal[string] ,
identifier[transport] = identifier[__opts__] [ literal[string] ],
identifier[opts] = identifier[__opts__] ,
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[listen] = keyword[True] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[val] = identifier[uuid] . identifier[UUID] ( identifier[token] )
keyword[except] identifier[ValueError] :
identifier[log] . identifier[warning] ( literal[string] )
identifier[appender] = identifier[SocketAppender] ( identifier[verbose] = keyword[False] , identifier[LE_API] = identifier[endpoint] , identifier[LE_PORT] = identifier[port] )
identifier[appender] . identifier[reopen_connection] ()
keyword[while] keyword[True] :
identifier[event] = identifier[event_bus] . identifier[get_event] ()
keyword[if] identifier[event] :
identifier[msg] = identifier[str] ( literal[string] ). identifier[join] ((
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[token] ),
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[tag] ),
identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[event] )
))
identifier[appender] . identifier[put] ( identifier[msg] )
identifier[appender] . identifier[close_connection] () | def start(endpoint='data.logentries.com', port=10000, token=None, tag='salt/engines/logentries'):
"""
Listen to salt events and forward them to Logentries
"""
if __opts__.get('id').endswith('_master'):
event_bus = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir'], listen=True) # depends on [control=['if'], data=[]]
else:
event_bus = salt.utils.event.get_event('minion', transport=__opts__['transport'], opts=__opts__, sock_dir=__opts__['sock_dir'], listen=True)
log.debug('Logentries engine started')
try:
val = uuid.UUID(token) # depends on [control=['try'], data=[]]
except ValueError:
log.warning('Not a valid logentries token') # depends on [control=['except'], data=[]]
appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port)
appender.reopen_connection()
while True:
event = event_bus.get_event()
if event:
# future lint: disable=blacklisted-function
msg = str(' ').join((salt.utils.stringutils.to_str(token), salt.utils.stringutils.to_str(tag), salt.utils.json.dumps(event)))
# future lint: enable=blacklisted-function
appender.put(msg) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
appender.close_connection() |
def run(self):
""" Run the command """
device = self.model_config.torch_device()
env = self.vec_env_factory.instantiate_single(preset='record', seed=self.model_config.seed)
model = self.model_factory.instantiate(action_space=env.action_space).to(device)
training_info = TrainingInfo(
start_epoch_idx=self.storage.last_epoch_idx(),
run_name=self.model_config.run_name
)
self.storage.load(training_info, model)
model.eval()
self.run_model(model, env, device) | def function[run, parameter[self]]:
constant[ Run the command ]
variable[device] assign[=] call[name[self].model_config.torch_device, parameter[]]
variable[env] assign[=] call[name[self].vec_env_factory.instantiate_single, parameter[]]
variable[model] assign[=] call[call[name[self].model_factory.instantiate, parameter[]].to, parameter[name[device]]]
variable[training_info] assign[=] call[name[TrainingInfo], parameter[]]
call[name[self].storage.load, parameter[name[training_info], name[model]]]
call[name[model].eval, parameter[]]
call[name[self].run_model, parameter[name[model], name[env], name[device]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[device] = identifier[self] . identifier[model_config] . identifier[torch_device] ()
identifier[env] = identifier[self] . identifier[vec_env_factory] . identifier[instantiate_single] ( identifier[preset] = literal[string] , identifier[seed] = identifier[self] . identifier[model_config] . identifier[seed] )
identifier[model] = identifier[self] . identifier[model_factory] . identifier[instantiate] ( identifier[action_space] = identifier[env] . identifier[action_space] ). identifier[to] ( identifier[device] )
identifier[training_info] = identifier[TrainingInfo] (
identifier[start_epoch_idx] = identifier[self] . identifier[storage] . identifier[last_epoch_idx] (),
identifier[run_name] = identifier[self] . identifier[model_config] . identifier[run_name]
)
identifier[self] . identifier[storage] . identifier[load] ( identifier[training_info] , identifier[model] )
identifier[model] . identifier[eval] ()
identifier[self] . identifier[run_model] ( identifier[model] , identifier[env] , identifier[device] ) | def run(self):
""" Run the command """
device = self.model_config.torch_device()
env = self.vec_env_factory.instantiate_single(preset='record', seed=self.model_config.seed)
model = self.model_factory.instantiate(action_space=env.action_space).to(device)
training_info = TrainingInfo(start_epoch_idx=self.storage.last_epoch_idx(), run_name=self.model_config.run_name)
self.storage.load(training_info, model)
model.eval()
self.run_model(model, env, device) |
def move(self, particle, u, v, w, modelTimestep, **kwargs):
# If the particle is settled, don't move it anywhere
if particle.settled:
return { 'u': 0, 'v': 0, 'w': 0 }
# If the particle is halted (but not settled), don't move it anywhere
if particle.halted:
return { 'u': 0, 'v': 0, 'w': 0 }
# How far could I move? We don't want to overshoot our desired depth.
vertical_potential = w * modelTimestep
"""
This only works if min is less than max.
No checks are done here, so it should be done before
calling this function.
"""
""" I'm below my desired max depth, so i need to go down
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-------------------------------------- min
-------------------------------------- max
x me
______________________________________
"""
if particle.location.depth < self.max_depth:
logger.debug("DIEL: %s - Moving UP to desired depth from %f" % (self.logstring(), particle.location.depth))
# If we are going to overshoot the desired minimum depth,
# calculate a new w to land in the middle of the range.
overshoot_distance = abs(particle.location.depth - self.min_depth)
if overshoot_distance < abs(vertical_potential):
halfway_distance = abs((self.max_depth - self.min_depth) / 2)
w = ((overshoot_distance - halfway_distance) / modelTimestep)
return { 'u': u, 'v': v, 'w': w }
""" I'm above my desired min depth, so i need to go down
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x me
-------------------------------------- min
-------------------------------------- max
______________________________________
"""
if particle.location.depth > self.min_depth:
logger.debug("DIEL: %s - Moving DOWN to desired depth from %f" % (self.logstring(), particle.location.depth))
# If we are going to overshoot the desired maximum depth,
# calculate a new w to land in the middle of the range.
overshoot_distance = abs(particle.location.depth - self.max_depth)
if overshoot_distance < abs(vertical_potential):
halfway_distance = abs((self.max_depth - self.min_depth) / 2)
w = ((overshoot_distance - halfway_distance) / modelTimestep)
return { 'u': u, 'v': v, 'w': -w }
""" I'm in my desired depth, so I'm just gonna chill here
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-------------------------------------- min
x me
-------------------------------------- max
______________________________________
"""
return { 'u': u, 'v': v, 'w': 0 } | def function[move, parameter[self, particle, u, v, w, modelTimestep]]:
if name[particle].settled begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b28f0fa0>, <ast.Constant object at 0x7da1b28f26e0>, <ast.Constant object at 0x7da1b28f2b60>], [<ast.Constant object at 0x7da1b28f2800>, <ast.Constant object at 0x7da1b28f2b30>, <ast.Constant object at 0x7da1b28f1000>]]]
if name[particle].halted begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b28f3ee0>, <ast.Constant object at 0x7da1b28f12a0>, <ast.Constant object at 0x7da1b28f3dc0>], [<ast.Constant object at 0x7da1b28f3f40>, <ast.Constant object at 0x7da1b28f25c0>, <ast.Constant object at 0x7da1b28f1360>]]]
variable[vertical_potential] assign[=] binary_operation[name[w] * name[modelTimestep]]
constant[
This only works if min is less than max.
No checks are done here, so it should be done before
calling this function.
]
constant[ I'm below my desired max depth, so i need to go down
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-------------------------------------- min
-------------------------------------- max
x me
______________________________________
]
if compare[name[particle].location.depth less[<] name[self].max_depth] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[DIEL: %s - Moving UP to desired depth from %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b28f3550>, <ast.Attribute object at 0x7da1b28f16c0>]]]]]
variable[overshoot_distance] assign[=] call[name[abs], parameter[binary_operation[name[particle].location.depth - name[self].min_depth]]]
if compare[name[overshoot_distance] less[<] call[name[abs], parameter[name[vertical_potential]]]] begin[:]
variable[halfway_distance] assign[=] call[name[abs], parameter[binary_operation[binary_operation[name[self].max_depth - name[self].min_depth] / constant[2]]]]
variable[w] assign[=] binary_operation[binary_operation[name[overshoot_distance] - name[halfway_distance]] / name[modelTimestep]]
return[dictionary[[<ast.Constant object at 0x7da1b28f2590>, <ast.Constant object at 0x7da1b28f3e20>, <ast.Constant object at 0x7da1b28f3f10>], [<ast.Name object at 0x7da1b28f0d90>, <ast.Name object at 0x7da1b28f2050>, <ast.Name object at 0x7da1b28f1390>]]]
constant[ I'm above my desired min depth, so i need to go down
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x me
-------------------------------------- min
-------------------------------------- max
______________________________________
]
if compare[name[particle].location.depth greater[>] name[self].min_depth] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[DIEL: %s - Moving DOWN to desired depth from %f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b28f2fb0>, <ast.Attribute object at 0x7da1b28f1b70>]]]]]
variable[overshoot_distance] assign[=] call[name[abs], parameter[binary_operation[name[particle].location.depth - name[self].max_depth]]]
if compare[name[overshoot_distance] less[<] call[name[abs], parameter[name[vertical_potential]]]] begin[:]
variable[halfway_distance] assign[=] call[name[abs], parameter[binary_operation[binary_operation[name[self].max_depth - name[self].min_depth] / constant[2]]]]
variable[w] assign[=] binary_operation[binary_operation[name[overshoot_distance] - name[halfway_distance]] / name[modelTimestep]]
return[dictionary[[<ast.Constant object at 0x7da18f720190>, <ast.Constant object at 0x7da18f722080>, <ast.Constant object at 0x7da18f7218d0>], [<ast.Name object at 0x7da18f721c60>, <ast.Name object at 0x7da18f723130>, <ast.UnaryOp object at 0x7da18f722b90>]]]
constant[ I'm in my desired depth, so I'm just gonna chill here
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-------------------------------------- min
x me
-------------------------------------- max
______________________________________
]
return[dictionary[[<ast.Constant object at 0x7da18f721420>, <ast.Constant object at 0x7da18f7216f0>, <ast.Constant object at 0x7da18f721300>], [<ast.Name object at 0x7da18f722980>, <ast.Name object at 0x7da18f721750>, <ast.Constant object at 0x7da18f722260>]]] | keyword[def] identifier[move] ( identifier[self] , identifier[particle] , identifier[u] , identifier[v] , identifier[w] , identifier[modelTimestep] ,** identifier[kwargs] ):
keyword[if] identifier[particle] . identifier[settled] :
keyword[return] { literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
keyword[if] identifier[particle] . identifier[halted] :
keyword[return] { literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[vertical_potential] = identifier[w] * identifier[modelTimestep]
literal[string]
literal[string]
keyword[if] identifier[particle] . identifier[location] . identifier[depth] < identifier[self] . identifier[max_depth] :
identifier[logger] . identifier[debug] ( literal[string] %( identifier[self] . identifier[logstring] (), identifier[particle] . identifier[location] . identifier[depth] ))
identifier[overshoot_distance] = identifier[abs] ( identifier[particle] . identifier[location] . identifier[depth] - identifier[self] . identifier[min_depth] )
keyword[if] identifier[overshoot_distance] < identifier[abs] ( identifier[vertical_potential] ):
identifier[halfway_distance] = identifier[abs] (( identifier[self] . identifier[max_depth] - identifier[self] . identifier[min_depth] )/ literal[int] )
identifier[w] =(( identifier[overshoot_distance] - identifier[halfway_distance] )/ identifier[modelTimestep] )
keyword[return] { literal[string] : identifier[u] , literal[string] : identifier[v] , literal[string] : identifier[w] }
literal[string]
keyword[if] identifier[particle] . identifier[location] . identifier[depth] > identifier[self] . identifier[min_depth] :
identifier[logger] . identifier[debug] ( literal[string] %( identifier[self] . identifier[logstring] (), identifier[particle] . identifier[location] . identifier[depth] ))
identifier[overshoot_distance] = identifier[abs] ( identifier[particle] . identifier[location] . identifier[depth] - identifier[self] . identifier[max_depth] )
keyword[if] identifier[overshoot_distance] < identifier[abs] ( identifier[vertical_potential] ):
identifier[halfway_distance] = identifier[abs] (( identifier[self] . identifier[max_depth] - identifier[self] . identifier[min_depth] )/ literal[int] )
identifier[w] =(( identifier[overshoot_distance] - identifier[halfway_distance] )/ identifier[modelTimestep] )
keyword[return] { literal[string] : identifier[u] , literal[string] : identifier[v] , literal[string] :- identifier[w] }
literal[string]
keyword[return] { literal[string] : identifier[u] , literal[string] : identifier[v] , literal[string] : literal[int] } | def move(self, particle, u, v, w, modelTimestep, **kwargs):
# If the particle is settled, don't move it anywhere
if particle.settled:
return {'u': 0, 'v': 0, 'w': 0} # depends on [control=['if'], data=[]]
# If the particle is halted (but not settled), don't move it anywhere
if particle.halted:
return {'u': 0, 'v': 0, 'w': 0} # depends on [control=['if'], data=[]]
# How far could I move? We don't want to overshoot our desired depth.
vertical_potential = w * modelTimestep
'\n This only works if min is less than max.\n No checks are done here, so it should be done before\n calling this function.\n '
" I'm below my desired max depth, so i need to go down\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n -------------------------------------- min\n -------------------------------------- max\n x me\n ______________________________________\n "
if particle.location.depth < self.max_depth:
logger.debug('DIEL: %s - Moving UP to desired depth from %f' % (self.logstring(), particle.location.depth))
# If we are going to overshoot the desired minimum depth,
# calculate a new w to land in the middle of the range.
overshoot_distance = abs(particle.location.depth - self.min_depth)
if overshoot_distance < abs(vertical_potential):
halfway_distance = abs((self.max_depth - self.min_depth) / 2)
w = (overshoot_distance - halfway_distance) / modelTimestep # depends on [control=['if'], data=['overshoot_distance']]
return {'u': u, 'v': v, 'w': w} # depends on [control=['if'], data=[]]
" I'm above my desired min depth, so i need to go down\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n x me\n -------------------------------------- min\n -------------------------------------- max\n ______________________________________\n "
if particle.location.depth > self.min_depth:
logger.debug('DIEL: %s - Moving DOWN to desired depth from %f' % (self.logstring(), particle.location.depth))
# If we are going to overshoot the desired maximum depth,
# calculate a new w to land in the middle of the range.
overshoot_distance = abs(particle.location.depth - self.max_depth)
if overshoot_distance < abs(vertical_potential):
halfway_distance = abs((self.max_depth - self.min_depth) / 2)
w = (overshoot_distance - halfway_distance) / modelTimestep # depends on [control=['if'], data=['overshoot_distance']]
return {'u': u, 'v': v, 'w': -w} # depends on [control=['if'], data=[]]
" I'm in my desired depth, so I'm just gonna chill here\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n -------------------------------------- min\n x me\n -------------------------------------- max\n ______________________________________\n "
return {'u': u, 'v': v, 'w': 0} |
def centroid_2dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
gfit = fit_2dgaussian(data, error=error, mask=mask)
return np.array([gfit.x_mean.value, gfit.y_mean.value]) | def function[centroid_2dg, parameter[data, error, mask]]:
constant[
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
]
variable[gfit] assign[=] call[name[fit_2dgaussian], parameter[name[data]]]
return[call[name[np].array, parameter[list[[<ast.Attribute object at 0x7da20e954a60>, <ast.Attribute object at 0x7da20e956bf0>]]]]] | keyword[def] identifier[centroid_2dg] ( identifier[data] , identifier[error] = keyword[None] , identifier[mask] = keyword[None] ):
literal[string]
identifier[gfit] = identifier[fit_2dgaussian] ( identifier[data] , identifier[error] = identifier[error] , identifier[mask] = identifier[mask] )
keyword[return] identifier[np] . identifier[array] ([ identifier[gfit] . identifier[x_mean] . identifier[value] , identifier[gfit] . identifier[y_mean] . identifier[value] ]) | def centroid_2dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
gfit = fit_2dgaussian(data, error=error, mask=mask)
return np.array([gfit.x_mean.value, gfit.y_mean.value]) |
def get_go2children_go2obj(go2obj):
"""Return go2children (set of child GO IDs) for all GO ID keys in go2obj."""
goobjs, altgo2goobj = get_goobjs_altgo2goobj(go2obj)
go2children = get_id2children(goobjs)
add_alt_goids(go2children, altgo2goobj)
return go2children | def function[get_go2children_go2obj, parameter[go2obj]]:
constant[Return go2children (set of child GO IDs) for all GO ID keys in go2obj.]
<ast.Tuple object at 0x7da20e954c70> assign[=] call[name[get_goobjs_altgo2goobj], parameter[name[go2obj]]]
variable[go2children] assign[=] call[name[get_id2children], parameter[name[goobjs]]]
call[name[add_alt_goids], parameter[name[go2children], name[altgo2goobj]]]
return[name[go2children]] | keyword[def] identifier[get_go2children_go2obj] ( identifier[go2obj] ):
literal[string]
identifier[goobjs] , identifier[altgo2goobj] = identifier[get_goobjs_altgo2goobj] ( identifier[go2obj] )
identifier[go2children] = identifier[get_id2children] ( identifier[goobjs] )
identifier[add_alt_goids] ( identifier[go2children] , identifier[altgo2goobj] )
keyword[return] identifier[go2children] | def get_go2children_go2obj(go2obj):
"""Return go2children (set of child GO IDs) for all GO ID keys in go2obj."""
(goobjs, altgo2goobj) = get_goobjs_altgo2goobj(go2obj)
go2children = get_id2children(goobjs)
add_alt_goids(go2children, altgo2goobj)
return go2children |
def global_position_int_cov_send(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance, force_mavlink1=False):
'''
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
'''
return self.send(self.global_position_int_cov_encode(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance), force_mavlink1=force_mavlink1) | def function[global_position_int_cov_send, parameter[self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance, force_mavlink1]]:
constant[
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
]
return[call[name[self].send, parameter[call[name[self].global_position_int_cov_encode, parameter[name[time_boot_ms], name[time_utc], name[estimator_type], name[lat], name[lon], name[alt], name[relative_alt], name[vx], name[vy], name[vz], name[covariance]]]]]] | keyword[def] identifier[global_position_int_cov_send] ( identifier[self] , identifier[time_boot_ms] , identifier[time_utc] , identifier[estimator_type] , identifier[lat] , identifier[lon] , identifier[alt] , identifier[relative_alt] , identifier[vx] , identifier[vy] , identifier[vz] , identifier[covariance] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[global_position_int_cov_encode] ( identifier[time_boot_ms] , identifier[time_utc] , identifier[estimator_type] , identifier[lat] , identifier[lon] , identifier[alt] , identifier[relative_alt] , identifier[vx] , identifier[vy] , identifier[vz] , identifier[covariance] ), identifier[force_mavlink1] = identifier[force_mavlink1] ) | def global_position_int_cov_send(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance, force_mavlink1=False):
"""
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
"""
return self.send(self.global_position_int_cov_encode(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance), force_mavlink1=force_mavlink1) |
def returnAllChips(self,extname=None,exclude=None):
""" Returns a list containing all the chips which match the
extname given minus those specified for exclusion (if any).
"""
extensions = self._findExtnames(extname=extname,exclude=exclude)
chiplist = []
for i in range(1,self._nextend+1,1):
if 'extver' in self._image[i].header:
extver = self._image[i].header['extver']
else:
extver = 1
if hasattr(self._image[i],'_extension') and \
"IMAGE" in self._image[i]._extension:
if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member:
chiplist.append(self._image[i])
return chiplist | def function[returnAllChips, parameter[self, extname, exclude]]:
constant[ Returns a list containing all the chips which match the
extname given minus those specified for exclusion (if any).
]
variable[extensions] assign[=] call[name[self]._findExtnames, parameter[]]
variable[chiplist] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self]._nextend + constant[1]], constant[1]]]] begin[:]
if compare[constant[extver] in call[name[self]._image][name[i]].header] begin[:]
variable[extver] assign[=] call[call[name[self]._image][name[i]].header][constant[extver]]
if <ast.BoolOp object at 0x7da1b1bb5870> begin[:]
if <ast.BoolOp object at 0x7da1b1bb51b0> begin[:]
call[name[chiplist].append, parameter[call[name[self]._image][name[i]]]]
return[name[chiplist]] | keyword[def] identifier[returnAllChips] ( identifier[self] , identifier[extname] = keyword[None] , identifier[exclude] = keyword[None] ):
literal[string]
identifier[extensions] = identifier[self] . identifier[_findExtnames] ( identifier[extname] = identifier[extname] , identifier[exclude] = identifier[exclude] )
identifier[chiplist] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_nextend] + literal[int] , literal[int] ):
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_image] [ identifier[i] ]. identifier[header] :
identifier[extver] = identifier[self] . identifier[_image] [ identifier[i] ]. identifier[header] [ literal[string] ]
keyword[else] :
identifier[extver] = literal[int]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[_image] [ identifier[i] ], literal[string] ) keyword[and] literal[string] keyword[in] identifier[self] . identifier[_image] [ identifier[i] ]. identifier[_extension] :
keyword[if] ( identifier[self] . identifier[_image] [ identifier[i] ]. identifier[extname] keyword[in] identifier[extensions] ) keyword[and] identifier[self] . identifier[_image] [ identifier[self] . identifier[scienceExt] , identifier[extver] ]. identifier[group_member] :
identifier[chiplist] . identifier[append] ( identifier[self] . identifier[_image] [ identifier[i] ])
keyword[return] identifier[chiplist] | def returnAllChips(self, extname=None, exclude=None):
""" Returns a list containing all the chips which match the
extname given minus those specified for exclusion (if any).
"""
extensions = self._findExtnames(extname=extname, exclude=exclude)
chiplist = []
for i in range(1, self._nextend + 1, 1):
if 'extver' in self._image[i].header:
extver = self._image[i].header['extver'] # depends on [control=['if'], data=[]]
else:
extver = 1
if hasattr(self._image[i], '_extension') and 'IMAGE' in self._image[i]._extension:
if self._image[i].extname in extensions and self._image[self.scienceExt, extver].group_member:
chiplist.append(self._image[i]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return chiplist |
def get_first_defined(data, keys, default_value=None):
"""
Get the first defined key in data.
:param data:
:type data:
:param keys:
:type keys:
:param default_value:
:type default_value:
:return:
:rtype:
"""
for key in keys:
if key in data:
return data[key]
return default_value | def function[get_first_defined, parameter[data, keys, default_value]]:
constant[
Get the first defined key in data.
:param data:
:type data:
:param keys:
:type keys:
:param default_value:
:type default_value:
:return:
:rtype:
]
for taget[name[key]] in starred[name[keys]] begin[:]
if compare[name[key] in name[data]] begin[:]
return[call[name[data]][name[key]]]
return[name[default_value]] | keyword[def] identifier[get_first_defined] ( identifier[data] , identifier[keys] , identifier[default_value] = keyword[None] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[if] identifier[key] keyword[in] identifier[data] :
keyword[return] identifier[data] [ identifier[key] ]
keyword[return] identifier[default_value] | def get_first_defined(data, keys, default_value=None):
"""
Get the first defined key in data.
:param data:
:type data:
:param keys:
:type keys:
:param default_value:
:type default_value:
:return:
:rtype:
"""
for key in keys:
if key in data:
return data[key] # depends on [control=['if'], data=['key', 'data']] # depends on [control=['for'], data=['key']]
return default_value |
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element] | def function[remove_vertex, parameter[self, vertex]]:
constant[
Remove vertex from G
]
<ast.Try object at 0x7da204344070>
if compare[name[vertex] in name[self].nodes] begin[:]
call[name[self].nodes.pop, parameter[name[vertex]]]
for taget[name[element]] in starred[name[self].vertices] begin[:]
if compare[name[vertex] in call[name[self].vertices][name[element]]] begin[:]
call[call[name[self].vertices][name[element]].remove, parameter[name[vertex]]]
variable[edges] assign[=] list[[]]
for taget[name[element]] in starred[name[self].edges] begin[:]
if compare[name[vertex] in name[element]] begin[:]
call[name[edges].append, parameter[name[element]]]
for taget[name[element]] in starred[name[edges]] begin[:]
<ast.Delete object at 0x7da1b14c5ed0> | keyword[def] identifier[remove_vertex] ( identifier[self] , identifier[vertex] ):
literal[string]
keyword[try] :
identifier[self] . identifier[vertices] . identifier[pop] ( identifier[vertex] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[GraphInsertError] ( literal[string] %( identifier[vertex] ,))
keyword[if] identifier[vertex] keyword[in] identifier[self] . identifier[nodes] :
identifier[self] . identifier[nodes] . identifier[pop] ( identifier[vertex] )
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[vertices] :
keyword[if] identifier[vertex] keyword[in] identifier[self] . identifier[vertices] [ identifier[element] ]:
identifier[self] . identifier[vertices] [ identifier[element] ]. identifier[remove] ( identifier[vertex] )
identifier[edges] =[]
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[edges] :
keyword[if] identifier[vertex] keyword[in] identifier[element] :
identifier[edges] . identifier[append] ( identifier[element] )
keyword[for] identifier[element] keyword[in] identifier[edges] :
keyword[del] identifier[self] . identifier[edges] [ identifier[element] ] | def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex) # depends on [control=['try'], data=[]]
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,)) # depends on [control=['except'], data=[]]
if vertex in self.nodes:
self.nodes.pop(vertex) # depends on [control=['if'], data=['vertex']]
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex) # depends on [control=['if'], data=['vertex']] # depends on [control=['for'], data=['element']]
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element) # depends on [control=['if'], data=['element']] # depends on [control=['for'], data=['element']]
for element in edges:
del self.edges[element] # depends on [control=['for'], data=['element']] |
def __create_device(self):
"""
creates device, internal use only
"""
# retrieve netengine dictionary from memory or from network
device_dict = getattr(self, 'netengine_dict', self.netengine.to_dict())
device = Device()
device.node_id = self.node_id
device.name = device_dict['name']
device.type = device_dict['type']
device.status = DEVICE_STATUS.get('reachable')
device.os = device_dict['os']
device.os_version = device_dict['os_version']
# this is the first time the device is seen by the system because we are just adding it
device.first_seen = now()
# and is also the latest
device.last_seen = now()
device.full_clean()
device.save()
# add routing protocols
for routing_protocol in device_dict['routing_protocols']:
# retrieve routing protocol from DB
try:
rp = RoutingProtocol.objects.filter(
name__iexact=routing_protocol['name'],
version__iexact=routing_protocol['version']
)[0]
# create if doesn't exist yet
except IndexError:
rp = RoutingProtocol(
name=routing_protocol['name'],
version=routing_protocol['version']
)
rp.full_clean()
rp.save()
# add to device
device.routing_protocols.add(rp)
for interface in device_dict['interfaces']:
interface_object = False
vap_object = False
# create interface depending on type
if interface['type'] == 'ethernet':
interface_object = Ethernet(**{
'device': device,
'name': interface['name'],
'mac': interface['mac_address'],
'mtu': interface['mtu'],
'standard': interface['standard'],
'duplex': interface['duplex'],
'tx_rate': interface['tx_rate'],
'rx_rate': interface['rx_rate']
})
elif interface['type'] == 'wireless':
interface_object = Wireless(**{
'device': device,
'name': interface['name'],
'mac': interface['mac_address'],
'mtu': interface['mtu'],
'mode': interface['mode'],
'standard': interface['standard'],
'channel': interface['channel'],
'channel_width': interface['channel_width'],
'output_power': interface['output_power'],
'dbm': interface['dbm'],
'noise': interface['noise'],
'tx_rate': interface['tx_rate'],
'rx_rate': interface['rx_rate']
})
for vap in interface['vap']:
vap_object = Vap(
essid=vap['essid'],
bssid=vap['bssid'],
encryption=vap['encryption']
)
if interface_object:
interface_object.full_clean()
interface_object.save()
if vap_object:
vap_object.interface = interface_object
vap_object.full_clean()
vap_object.save()
for ip in interface['ip']:
ip_object = Ip(**{
'interface': interface_object,
'address': ip['address'],
})
ip_object.full_clean()
ip_object.save()
if HARDWARE_INSTALLED:
# try getting device model from db
try:
device_model = DeviceModel.objects.filter(name__iexact=device_dict['model'])[0]
# if it does not exist create it
except IndexError as e:
# try getting manufacturer from DB
try:
manufacturer = Manufacturer.objects.filter(name__iexact=device_dict['manufacturer'])[0]
# or create
except IndexError as e:
manufacturer = Manufacturer(name=device_dict['manufacturer'])
manufacturer.full_clean()
manufacturer.save()
device_model = DeviceModel(
manufacturer=manufacturer,
name=device_dict['model']
)
device_model.ram = device_dict['RAM_total']
device_model.full_clean()
device_model.save()
# create relation between device model and device
rel = DeviceToModelRel(device=device, model=device_model)
rel.full_clean()
rel.save()
return device | def function[__create_device, parameter[self]]:
constant[
creates device, internal use only
]
variable[device_dict] assign[=] call[name[getattr], parameter[name[self], constant[netengine_dict], call[name[self].netengine.to_dict, parameter[]]]]
variable[device] assign[=] call[name[Device], parameter[]]
name[device].node_id assign[=] name[self].node_id
name[device].name assign[=] call[name[device_dict]][constant[name]]
name[device].type assign[=] call[name[device_dict]][constant[type]]
name[device].status assign[=] call[name[DEVICE_STATUS].get, parameter[constant[reachable]]]
name[device].os assign[=] call[name[device_dict]][constant[os]]
name[device].os_version assign[=] call[name[device_dict]][constant[os_version]]
name[device].first_seen assign[=] call[name[now], parameter[]]
name[device].last_seen assign[=] call[name[now], parameter[]]
call[name[device].full_clean, parameter[]]
call[name[device].save, parameter[]]
for taget[name[routing_protocol]] in starred[call[name[device_dict]][constant[routing_protocols]]] begin[:]
<ast.Try object at 0x7da20c76dae0>
call[name[device].routing_protocols.add, parameter[name[rp]]]
for taget[name[interface]] in starred[call[name[device_dict]][constant[interfaces]]] begin[:]
variable[interface_object] assign[=] constant[False]
variable[vap_object] assign[=] constant[False]
if compare[call[name[interface]][constant[type]] equal[==] constant[ethernet]] begin[:]
variable[interface_object] assign[=] call[name[Ethernet], parameter[]]
if name[interface_object] begin[:]
call[name[interface_object].full_clean, parameter[]]
call[name[interface_object].save, parameter[]]
if name[vap_object] begin[:]
name[vap_object].interface assign[=] name[interface_object]
call[name[vap_object].full_clean, parameter[]]
call[name[vap_object].save, parameter[]]
for taget[name[ip]] in starred[call[name[interface]][constant[ip]]] begin[:]
variable[ip_object] assign[=] call[name[Ip], parameter[]]
call[name[ip_object].full_clean, parameter[]]
call[name[ip_object].save, parameter[]]
if name[HARDWARE_INSTALLED] begin[:]
<ast.Try object at 0x7da20c7c8d60>
call[name[device_model].full_clean, parameter[]]
call[name[device_model].save, parameter[]]
variable[rel] assign[=] call[name[DeviceToModelRel], parameter[]]
call[name[rel].full_clean, parameter[]]
call[name[rel].save, parameter[]]
return[name[device]] | keyword[def] identifier[__create_device] ( identifier[self] ):
literal[string]
identifier[device_dict] = identifier[getattr] ( identifier[self] , literal[string] , identifier[self] . identifier[netengine] . identifier[to_dict] ())
identifier[device] = identifier[Device] ()
identifier[device] . identifier[node_id] = identifier[self] . identifier[node_id]
identifier[device] . identifier[name] = identifier[device_dict] [ literal[string] ]
identifier[device] . identifier[type] = identifier[device_dict] [ literal[string] ]
identifier[device] . identifier[status] = identifier[DEVICE_STATUS] . identifier[get] ( literal[string] )
identifier[device] . identifier[os] = identifier[device_dict] [ literal[string] ]
identifier[device] . identifier[os_version] = identifier[device_dict] [ literal[string] ]
identifier[device] . identifier[first_seen] = identifier[now] ()
identifier[device] . identifier[last_seen] = identifier[now] ()
identifier[device] . identifier[full_clean] ()
identifier[device] . identifier[save] ()
keyword[for] identifier[routing_protocol] keyword[in] identifier[device_dict] [ literal[string] ]:
keyword[try] :
identifier[rp] = identifier[RoutingProtocol] . identifier[objects] . identifier[filter] (
identifier[name__iexact] = identifier[routing_protocol] [ literal[string] ],
identifier[version__iexact] = identifier[routing_protocol] [ literal[string] ]
)[ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[rp] = identifier[RoutingProtocol] (
identifier[name] = identifier[routing_protocol] [ literal[string] ],
identifier[version] = identifier[routing_protocol] [ literal[string] ]
)
identifier[rp] . identifier[full_clean] ()
identifier[rp] . identifier[save] ()
identifier[device] . identifier[routing_protocols] . identifier[add] ( identifier[rp] )
keyword[for] identifier[interface] keyword[in] identifier[device_dict] [ literal[string] ]:
identifier[interface_object] = keyword[False]
identifier[vap_object] = keyword[False]
keyword[if] identifier[interface] [ literal[string] ]== literal[string] :
identifier[interface_object] = identifier[Ethernet] (**{
literal[string] : identifier[device] ,
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ]
})
keyword[elif] identifier[interface] [ literal[string] ]== literal[string] :
identifier[interface_object] = identifier[Wireless] (**{
literal[string] : identifier[device] ,
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ],
literal[string] : identifier[interface] [ literal[string] ]
})
keyword[for] identifier[vap] keyword[in] identifier[interface] [ literal[string] ]:
identifier[vap_object] = identifier[Vap] (
identifier[essid] = identifier[vap] [ literal[string] ],
identifier[bssid] = identifier[vap] [ literal[string] ],
identifier[encryption] = identifier[vap] [ literal[string] ]
)
keyword[if] identifier[interface_object] :
identifier[interface_object] . identifier[full_clean] ()
identifier[interface_object] . identifier[save] ()
keyword[if] identifier[vap_object] :
identifier[vap_object] . identifier[interface] = identifier[interface_object]
identifier[vap_object] . identifier[full_clean] ()
identifier[vap_object] . identifier[save] ()
keyword[for] identifier[ip] keyword[in] identifier[interface] [ literal[string] ]:
identifier[ip_object] = identifier[Ip] (**{
literal[string] : identifier[interface_object] ,
literal[string] : identifier[ip] [ literal[string] ],
})
identifier[ip_object] . identifier[full_clean] ()
identifier[ip_object] . identifier[save] ()
keyword[if] identifier[HARDWARE_INSTALLED] :
keyword[try] :
identifier[device_model] = identifier[DeviceModel] . identifier[objects] . identifier[filter] ( identifier[name__iexact] = identifier[device_dict] [ literal[string] ])[ literal[int] ]
keyword[except] identifier[IndexError] keyword[as] identifier[e] :
keyword[try] :
identifier[manufacturer] = identifier[Manufacturer] . identifier[objects] . identifier[filter] ( identifier[name__iexact] = identifier[device_dict] [ literal[string] ])[ literal[int] ]
keyword[except] identifier[IndexError] keyword[as] identifier[e] :
identifier[manufacturer] = identifier[Manufacturer] ( identifier[name] = identifier[device_dict] [ literal[string] ])
identifier[manufacturer] . identifier[full_clean] ()
identifier[manufacturer] . identifier[save] ()
identifier[device_model] = identifier[DeviceModel] (
identifier[manufacturer] = identifier[manufacturer] ,
identifier[name] = identifier[device_dict] [ literal[string] ]
)
identifier[device_model] . identifier[ram] = identifier[device_dict] [ literal[string] ]
identifier[device_model] . identifier[full_clean] ()
identifier[device_model] . identifier[save] ()
identifier[rel] = identifier[DeviceToModelRel] ( identifier[device] = identifier[device] , identifier[model] = identifier[device_model] )
identifier[rel] . identifier[full_clean] ()
identifier[rel] . identifier[save] ()
keyword[return] identifier[device] | def __create_device(self):
"""
creates device, internal use only
"""
# retrieve netengine dictionary from memory or from network
device_dict = getattr(self, 'netengine_dict', self.netengine.to_dict())
device = Device()
device.node_id = self.node_id
device.name = device_dict['name']
device.type = device_dict['type']
device.status = DEVICE_STATUS.get('reachable')
device.os = device_dict['os']
device.os_version = device_dict['os_version']
# this is the first time the device is seen by the system because we are just adding it
device.first_seen = now()
# and is also the latest
device.last_seen = now()
device.full_clean()
device.save()
# add routing protocols
for routing_protocol in device_dict['routing_protocols']:
# retrieve routing protocol from DB
try:
rp = RoutingProtocol.objects.filter(name__iexact=routing_protocol['name'], version__iexact=routing_protocol['version'])[0] # depends on [control=['try'], data=[]]
# create if doesn't exist yet
except IndexError:
rp = RoutingProtocol(name=routing_protocol['name'], version=routing_protocol['version'])
rp.full_clean()
rp.save() # depends on [control=['except'], data=[]]
# add to device
device.routing_protocols.add(rp) # depends on [control=['for'], data=['routing_protocol']]
for interface in device_dict['interfaces']:
interface_object = False
vap_object = False
# create interface depending on type
if interface['type'] == 'ethernet':
interface_object = Ethernet(**{'device': device, 'name': interface['name'], 'mac': interface['mac_address'], 'mtu': interface['mtu'], 'standard': interface['standard'], 'duplex': interface['duplex'], 'tx_rate': interface['tx_rate'], 'rx_rate': interface['rx_rate']}) # depends on [control=['if'], data=[]]
elif interface['type'] == 'wireless':
interface_object = Wireless(**{'device': device, 'name': interface['name'], 'mac': interface['mac_address'], 'mtu': interface['mtu'], 'mode': interface['mode'], 'standard': interface['standard'], 'channel': interface['channel'], 'channel_width': interface['channel_width'], 'output_power': interface['output_power'], 'dbm': interface['dbm'], 'noise': interface['noise'], 'tx_rate': interface['tx_rate'], 'rx_rate': interface['rx_rate']})
for vap in interface['vap']:
vap_object = Vap(essid=vap['essid'], bssid=vap['bssid'], encryption=vap['encryption']) # depends on [control=['for'], data=['vap']] # depends on [control=['if'], data=[]]
if interface_object:
interface_object.full_clean()
interface_object.save()
if vap_object:
vap_object.interface = interface_object
vap_object.full_clean()
vap_object.save() # depends on [control=['if'], data=[]]
for ip in interface['ip']:
ip_object = Ip(**{'interface': interface_object, 'address': ip['address']})
ip_object.full_clean()
ip_object.save() # depends on [control=['for'], data=['ip']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['interface']]
if HARDWARE_INSTALLED:
# try getting device model from db
try:
device_model = DeviceModel.objects.filter(name__iexact=device_dict['model'])[0] # depends on [control=['try'], data=[]]
# if it does not exist create it
except IndexError as e:
# try getting manufacturer from DB
try:
manufacturer = Manufacturer.objects.filter(name__iexact=device_dict['manufacturer'])[0] # depends on [control=['try'], data=[]]
# or create
except IndexError as e:
manufacturer = Manufacturer(name=device_dict['manufacturer'])
manufacturer.full_clean()
manufacturer.save() # depends on [control=['except'], data=[]]
device_model = DeviceModel(manufacturer=manufacturer, name=device_dict['model'])
device_model.ram = device_dict['RAM_total'] # depends on [control=['except'], data=[]]
device_model.full_clean()
device_model.save()
# create relation between device model and device
rel = DeviceToModelRel(device=device, model=device_model)
rel.full_clean()
rel.save() # depends on [control=['if'], data=[]]
return device |
def recipients(self):
'''A list of all recipients for this message.
'''
cc = self._cc or []
bcc = self._bcc or []
return self._to + cc + bcc | def function[recipients, parameter[self]]:
constant[A list of all recipients for this message.
]
variable[cc] assign[=] <ast.BoolOp object at 0x7da18eb552d0>
variable[bcc] assign[=] <ast.BoolOp object at 0x7da20c7947f0>
return[binary_operation[binary_operation[name[self]._to + name[cc]] + name[bcc]]] | keyword[def] identifier[recipients] ( identifier[self] ):
literal[string]
identifier[cc] = identifier[self] . identifier[_cc] keyword[or] []
identifier[bcc] = identifier[self] . identifier[_bcc] keyword[or] []
keyword[return] identifier[self] . identifier[_to] + identifier[cc] + identifier[bcc] | def recipients(self):
"""A list of all recipients for this message.
"""
cc = self._cc or []
bcc = self._bcc or []
return self._to + cc + bcc |
def debug_flag():
"""Set logging level for entry points."""
logging.basicConfig(format=consts.LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=debug_flag.__doc__)
add_debug(parser)
args, _extra_args = parser.parse_known_args()
package, *_ = __package__.split('.')
logging.getLogger(package).setLevel(args.debug) | def function[debug_flag, parameter[]]:
constant[Set logging level for entry points.]
call[name[logging].basicConfig, parameter[]]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[add_debug], parameter[name[parser]]]
<ast.Tuple object at 0x7da20c992fb0> assign[=] call[name[parser].parse_known_args, parameter[]]
<ast.Tuple object at 0x7da20c9936d0> assign[=] call[name[__package__].split, parameter[constant[.]]]
call[call[name[logging].getLogger, parameter[name[package]]].setLevel, parameter[name[args].debug]] | keyword[def] identifier[debug_flag] ():
literal[string]
identifier[logging] . identifier[basicConfig] ( identifier[format] = identifier[consts] . identifier[LOGGING_FORMAT] )
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[debug_flag] . identifier[__doc__] )
identifier[add_debug] ( identifier[parser] )
identifier[args] , identifier[_extra_args] = identifier[parser] . identifier[parse_known_args] ()
identifier[package] ,* identifier[_] = identifier[__package__] . identifier[split] ( literal[string] )
identifier[logging] . identifier[getLogger] ( identifier[package] ). identifier[setLevel] ( identifier[args] . identifier[debug] ) | def debug_flag():
"""Set logging level for entry points."""
logging.basicConfig(format=consts.LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=debug_flag.__doc__)
add_debug(parser)
(args, _extra_args) = parser.parse_known_args()
(package, *_) = __package__.split('.')
logging.getLogger(package).setLevel(args.debug) |
def execute_operation(self, method="GET", ops_path="", payload=""):
"""
Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall: `GET http://localhost:8080/api/v1/events`
- `payload`: The optional payload which is relevant for `POST` or `PUT` methods only
"""
operation_path_URL = "".join([self.api_server, ops_path])
logging.debug("%s %s" %(method, operation_path_URL))
if payload == "":
res = requests.request(method, operation_path_URL)
else:
logging.debug("PAYLOAD:\n%s" %(payload))
res = requests.request(method, operation_path_URL, data=payload)
logging.debug("RESPONSE:\n%s" %(res.json()))
return res | def function[execute_operation, parameter[self, method, ops_path, payload]]:
constant[
Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall: `GET http://localhost:8080/api/v1/events`
- `payload`: The optional payload which is relevant for `POST` or `PUT` methods only
]
variable[operation_path_URL] assign[=] call[constant[].join, parameter[list[[<ast.Attribute object at 0x7da18f58f5b0>, <ast.Name object at 0x7da18f58d8d0>]]]]
call[name[logging].debug, parameter[binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f58fd60>, <ast.Name object at 0x7da18f58e980>]]]]]
if compare[name[payload] equal[==] constant[]] begin[:]
variable[res] assign[=] call[name[requests].request, parameter[name[method], name[operation_path_URL]]]
call[name[logging].debug, parameter[binary_operation[constant[RESPONSE:
%s] <ast.Mod object at 0x7da2590d6920> call[name[res].json, parameter[]]]]]
return[name[res]] | keyword[def] identifier[execute_operation] ( identifier[self] , identifier[method] = literal[string] , identifier[ops_path] = literal[string] , identifier[payload] = literal[string] ):
literal[string]
identifier[operation_path_URL] = literal[string] . identifier[join] ([ identifier[self] . identifier[api_server] , identifier[ops_path] ])
identifier[logging] . identifier[debug] ( literal[string] %( identifier[method] , identifier[operation_path_URL] ))
keyword[if] identifier[payload] == literal[string] :
identifier[res] = identifier[requests] . identifier[request] ( identifier[method] , identifier[operation_path_URL] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] %( identifier[payload] ))
identifier[res] = identifier[requests] . identifier[request] ( identifier[method] , identifier[operation_path_URL] , identifier[data] = identifier[payload] )
identifier[logging] . identifier[debug] ( literal[string] %( identifier[res] . identifier[json] ()))
keyword[return] identifier[res] | def execute_operation(self, method='GET', ops_path='', payload=''):
"""
Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall: `GET http://localhost:8080/api/v1/events`
- `payload`: The optional payload which is relevant for `POST` or `PUT` methods only
"""
operation_path_URL = ''.join([self.api_server, ops_path])
logging.debug('%s %s' % (method, operation_path_URL))
if payload == '':
res = requests.request(method, operation_path_URL) # depends on [control=['if'], data=[]]
else:
logging.debug('PAYLOAD:\n%s' % payload)
res = requests.request(method, operation_path_URL, data=payload)
logging.debug('RESPONSE:\n%s' % res.json())
return res |
def update_url(self, url=None):
"""
Accepts a fully-qualified url.
Returns True if successful, False if not successful.
"""
if not url:
raise ValueError("Neither a url or regex was provided to update_url.")
post_url = "%s%s" % (self.BASE_URL, url)
r = self.session.post(post_url)
return int(r.status_code) < 500 | def function[update_url, parameter[self, url]]:
constant[
Accepts a fully-qualified url.
Returns True if successful, False if not successful.
]
if <ast.UnaryOp object at 0x7da1b1b037f0> begin[:]
<ast.Raise object at 0x7da1b1b03910>
variable[post_url] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1b01030>, <ast.Name object at 0x7da1b1b039a0>]]]
variable[r] assign[=] call[name[self].session.post, parameter[name[post_url]]]
return[compare[call[name[int], parameter[name[r].status_code]] less[<] constant[500]]] | keyword[def] identifier[update_url] ( identifier[self] , identifier[url] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[url] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[post_url] = literal[string] %( identifier[self] . identifier[BASE_URL] , identifier[url] )
identifier[r] = identifier[self] . identifier[session] . identifier[post] ( identifier[post_url] )
keyword[return] identifier[int] ( identifier[r] . identifier[status_code] )< literal[int] | def update_url(self, url=None):
"""
Accepts a fully-qualified url.
Returns True if successful, False if not successful.
"""
if not url:
raise ValueError('Neither a url or regex was provided to update_url.') # depends on [control=['if'], data=[]]
post_url = '%s%s' % (self.BASE_URL, url)
r = self.session.post(post_url)
return int(r.status_code) < 500 |
def set_gateway(self, gateway):
'''
:param crabpy.gateway.capakey.CapakeyGateway gateway: Gateway to use.
'''
self.gateway = gateway
self.sectie.set_gateway(gateway) | def function[set_gateway, parameter[self, gateway]]:
constant[
:param crabpy.gateway.capakey.CapakeyGateway gateway: Gateway to use.
]
name[self].gateway assign[=] name[gateway]
call[name[self].sectie.set_gateway, parameter[name[gateway]]] | keyword[def] identifier[set_gateway] ( identifier[self] , identifier[gateway] ):
literal[string]
identifier[self] . identifier[gateway] = identifier[gateway]
identifier[self] . identifier[sectie] . identifier[set_gateway] ( identifier[gateway] ) | def set_gateway(self, gateway):
"""
:param crabpy.gateway.capakey.CapakeyGateway gateway: Gateway to use.
"""
self.gateway = gateway
self.sectie.set_gateway(gateway) |
async def message_field(self, msg, field, fvalue=None):
"""
Dumps/Loads message field
:param msg:
:param field:
:param fvalue: explicit value for dump
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
try:
self.tracker.push_field(fname)
if self.writing:
await self._dump_message_field(self.iobj, msg, field, fvalue=fvalue)
else:
await self._load_message_field(self.iobj, msg, field)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e | <ast.AsyncFunctionDef object at 0x7da18fe93730> | keyword[async] keyword[def] identifier[message_field] ( identifier[self] , identifier[msg] , identifier[field] , identifier[fvalue] = keyword[None] ):
literal[string]
identifier[fname] , identifier[ftype] , identifier[params] = identifier[field] [ literal[int] ], identifier[field] [ literal[int] ], identifier[field] [ literal[int] :]
keyword[try] :
identifier[self] . identifier[tracker] . identifier[push_field] ( identifier[fname] )
keyword[if] identifier[self] . identifier[writing] :
keyword[await] identifier[self] . identifier[_dump_message_field] ( identifier[self] . identifier[iobj] , identifier[msg] , identifier[field] , identifier[fvalue] = identifier[fvalue] )
keyword[else] :
keyword[await] identifier[self] . identifier[_load_message_field] ( identifier[self] . identifier[iobj] , identifier[msg] , identifier[field] )
identifier[self] . identifier[tracker] . identifier[pop] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[helpers] . identifier[ArchiveException] ( identifier[e] , identifier[tracker] = identifier[self] . identifier[tracker] ) keyword[from] identifier[e] | async def message_field(self, msg, field, fvalue=None):
"""
Dumps/Loads message field
:param msg:
:param field:
:param fvalue: explicit value for dump
:return:
"""
(fname, ftype, params) = (field[0], field[1], field[2:])
try:
self.tracker.push_field(fname)
if self.writing:
await self._dump_message_field(self.iobj, msg, field, fvalue=fvalue) # depends on [control=['if'], data=[]]
else:
await self._load_message_field(self.iobj, msg, field)
self.tracker.pop() # depends on [control=['try'], data=[]]
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e # depends on [control=['except'], data=['e']] |
def generate_sub_codons_left(codons_dict):
"""Generate the sub_codons_left dictionary of codon prefixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
"""
sub_codons_left = {}
for aa in codons_dict.keys():
sub_codons_left[aa] = list(set([x[0] for x in codons_dict[aa]] + [x[:2] for x in codons_dict[aa]]))
return sub_codons_left | def function[generate_sub_codons_left, parameter[codons_dict]]:
constant[Generate the sub_codons_left dictionary of codon prefixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
]
variable[sub_codons_left] assign[=] dictionary[[], []]
for taget[name[aa]] in starred[call[name[codons_dict].keys, parameter[]]] begin[:]
call[name[sub_codons_left]][name[aa]] assign[=] call[name[list], parameter[call[name[set], parameter[binary_operation[<ast.ListComp object at 0x7da20cabf910> + <ast.ListComp object at 0x7da20cabdb10>]]]]]
return[name[sub_codons_left]] | keyword[def] identifier[generate_sub_codons_left] ( identifier[codons_dict] ):
literal[string]
identifier[sub_codons_left] ={}
keyword[for] identifier[aa] keyword[in] identifier[codons_dict] . identifier[keys] ():
identifier[sub_codons_left] [ identifier[aa] ]= identifier[list] ( identifier[set] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[codons_dict] [ identifier[aa] ]]+[ identifier[x] [: literal[int] ] keyword[for] identifier[x] keyword[in] identifier[codons_dict] [ identifier[aa] ]]))
keyword[return] identifier[sub_codons_left] | def generate_sub_codons_left(codons_dict):
"""Generate the sub_codons_left dictionary of codon prefixes.
Parameters
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
Returns
-------
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
"""
sub_codons_left = {}
for aa in codons_dict.keys():
sub_codons_left[aa] = list(set([x[0] for x in codons_dict[aa]] + [x[:2] for x in codons_dict[aa]])) # depends on [control=['for'], data=['aa']]
return sub_codons_left |
def _boosted_value(name, action, key, value, boost):
"""Boost a value if we should in _process_queries"""
if boost is not None:
# Note: Most queries use 'value' for the key name except
# Match queries which use 'query'. So we have to do some
# switcheroo for that.
value_key = 'query' if action in MATCH_ACTIONS else 'value'
return {name: {'boost': boost, value_key: value}}
return {name: value} | def function[_boosted_value, parameter[name, action, key, value, boost]]:
constant[Boost a value if we should in _process_queries]
if compare[name[boost] is_not constant[None]] begin[:]
variable[value_key] assign[=] <ast.IfExp object at 0x7da1b10a4c70>
return[dictionary[[<ast.Name object at 0x7da1b10a7d60>], [<ast.Dict object at 0x7da1b10a4760>]]]
return[dictionary[[<ast.Name object at 0x7da1b0f2a4d0>], [<ast.Name object at 0x7da1b0f2a8f0>]]] | keyword[def] identifier[_boosted_value] ( identifier[name] , identifier[action] , identifier[key] , identifier[value] , identifier[boost] ):
literal[string]
keyword[if] identifier[boost] keyword[is] keyword[not] keyword[None] :
identifier[value_key] = literal[string] keyword[if] identifier[action] keyword[in] identifier[MATCH_ACTIONS] keyword[else] literal[string]
keyword[return] { identifier[name] :{ literal[string] : identifier[boost] , identifier[value_key] : identifier[value] }}
keyword[return] { identifier[name] : identifier[value] } | def _boosted_value(name, action, key, value, boost):
"""Boost a value if we should in _process_queries"""
if boost is not None:
# Note: Most queries use 'value' for the key name except
# Match queries which use 'query'. So we have to do some
# switcheroo for that.
value_key = 'query' if action in MATCH_ACTIONS else 'value'
return {name: {'boost': boost, value_key: value}} # depends on [control=['if'], data=['boost']]
return {name: value} |
def get_writer_position(self, name):
"""Get the current writer position"""
cursor = self.cursor
cursor.execute('SELECT timestamp FROM gauged_writer_history '
'WHERE id = %s', (name,))
result = cursor.fetchone()
return result[0] if result else 0 | def function[get_writer_position, parameter[self, name]]:
constant[Get the current writer position]
variable[cursor] assign[=] name[self].cursor
call[name[cursor].execute, parameter[constant[SELECT timestamp FROM gauged_writer_history WHERE id = %s], tuple[[<ast.Name object at 0x7da1b24ad7e0>]]]]
variable[result] assign[=] call[name[cursor].fetchone, parameter[]]
return[<ast.IfExp object at 0x7da1b24af0d0>] | keyword[def] identifier[get_writer_position] ( identifier[self] , identifier[name] ):
literal[string]
identifier[cursor] = identifier[self] . identifier[cursor]
identifier[cursor] . identifier[execute] ( literal[string]
literal[string] ,( identifier[name] ,))
identifier[result] = identifier[cursor] . identifier[fetchone] ()
keyword[return] identifier[result] [ literal[int] ] keyword[if] identifier[result] keyword[else] literal[int] | def get_writer_position(self, name):
"""Get the current writer position"""
cursor = self.cursor
cursor.execute('SELECT timestamp FROM gauged_writer_history WHERE id = %s', (name,))
result = cursor.fetchone()
return result[0] if result else 0 |
def shutdown(self, message=None):
"""Disconnect all servers with a message.
Args:
message (str): Quit message to use on each connection.
"""
for name, server in self.servers.items():
server.quit(message) | def function[shutdown, parameter[self, message]]:
constant[Disconnect all servers with a message.
Args:
message (str): Quit message to use on each connection.
]
for taget[tuple[[<ast.Name object at 0x7da2044c2d10>, <ast.Name object at 0x7da2044c1270>]]] in starred[call[name[self].servers.items, parameter[]]] begin[:]
call[name[server].quit, parameter[name[message]]] | keyword[def] identifier[shutdown] ( identifier[self] , identifier[message] = keyword[None] ):
literal[string]
keyword[for] identifier[name] , identifier[server] keyword[in] identifier[self] . identifier[servers] . identifier[items] ():
identifier[server] . identifier[quit] ( identifier[message] ) | def shutdown(self, message=None):
"""Disconnect all servers with a message.
Args:
message (str): Quit message to use on each connection.
"""
for (name, server) in self.servers.items():
server.quit(message) # depends on [control=['for'], data=[]] |
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec:
raise ValueError('Missing path specification.')
data_stream = getattr(path_spec, 'data_stream', None)
file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
file_system.Close()
raise IOError('Unable to retrieve file entry.')
tsk_file = file_entry.GetTSKFile()
tsk_attribute = None
# Note that because pytsk3.File does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_file, 'info', None) is None:
file_system.Close()
raise IOError('Missing attribute info in file (pytsk3.File).')
# Note that because pytsk3.TSK_FS_FILE does not explicitly defines meta
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_file.info, 'meta', None) is None:
file_system.Close()
raise IOError(
'Missing attribute meta in file.info pytsk3.TSK_FS_FILE).')
# Note that because pytsk3.TSK_FS_META does not explicitly defines size
# we need to check if the attribute exists.
if not hasattr(tsk_file.info.meta, 'size'):
file_system.Close()
raise IOError(
'Missing attribute size in file.info.meta (pytsk3.TSK_FS_META).')
# Note that because pytsk3.TSK_FS_META does not explicitly defines type
# we need to check if the attribute exists.
if not hasattr(tsk_file.info.meta, 'type'):
file_system.Close()
raise IOError(
'Missing attribute type in file.info.meta (pytsk3.TSK_FS_META).')
if data_stream:
for attribute in tsk_file:
if getattr(attribute, 'info', None) is None:
continue
# The value of the attribute name will be None for the default
# data stream.
attribute_name = getattr(attribute.info, 'name', None)
if attribute_name is None:
attribute_name = ''
else:
try:
# pytsk3 returns an UTF-8 encoded byte string.
attribute_name = attribute_name.decode('utf8')
except UnicodeError:
# Continue here since we cannot represent the attribute name.
continue
attribute_type = getattr(attribute.info, 'type', None)
if attribute_name == data_stream and attribute_type in (
pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT,
pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA,
pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA):
tsk_attribute = attribute
break
if tsk_attribute is None:
file_system.Close()
raise IOError('Unable to open data stream: {0:s}.'.format(data_stream))
if (not tsk_attribute and
tsk_file.info.meta.type != pytsk3.TSK_FS_META_TYPE_REG):
file_system.Close()
raise IOError('Not a regular file.')
self._current_offset = 0
self._file_system = file_system
self._tsk_attribute = tsk_attribute
self._tsk_file = tsk_file
if self._tsk_attribute:
self._size = self._tsk_attribute.info.size
else:
self._size = self._tsk_file.info.meta.size | def function[_Open, parameter[self, path_spec, mode]]:
constant[Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
]
if <ast.UnaryOp object at 0x7da1b07ce140> begin[:]
<ast.Raise object at 0x7da1b07cc850>
variable[data_stream] assign[=] call[name[getattr], parameter[name[path_spec], constant[data_stream], constant[None]]]
variable[file_system] assign[=] call[name[resolver].Resolver.OpenFileSystem, parameter[name[path_spec]]]
variable[file_entry] assign[=] call[name[file_system].GetFileEntryByPathSpec, parameter[name[path_spec]]]
if <ast.UnaryOp object at 0x7da1b07cd060> begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b07ceb60>
variable[tsk_file] assign[=] call[name[file_entry].GetTSKFile, parameter[]]
variable[tsk_attribute] assign[=] constant[None]
if compare[call[name[getattr], parameter[name[tsk_file], constant[info], constant[None]]] is constant[None]] begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b06798a0>
if compare[call[name[getattr], parameter[name[tsk_file].info, constant[meta], constant[None]]] is constant[None]] begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b06787f0>
if <ast.UnaryOp object at 0x7da1b0679fc0> begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b067a050>
if <ast.UnaryOp object at 0x7da1b0678130> begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b0678b50>
if name[data_stream] begin[:]
for taget[name[attribute]] in starred[name[tsk_file]] begin[:]
if compare[call[name[getattr], parameter[name[attribute], constant[info], constant[None]]] is constant[None]] begin[:]
continue
variable[attribute_name] assign[=] call[name[getattr], parameter[name[attribute].info, constant[name], constant[None]]]
if compare[name[attribute_name] is constant[None]] begin[:]
variable[attribute_name] assign[=] constant[]
variable[attribute_type] assign[=] call[name[getattr], parameter[name[attribute].info, constant[type], constant[None]]]
if <ast.BoolOp object at 0x7da1b067a1a0> begin[:]
variable[tsk_attribute] assign[=] name[attribute]
break
if compare[name[tsk_attribute] is constant[None]] begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b06799c0>
if <ast.BoolOp object at 0x7da1b064e4a0> begin[:]
call[name[file_system].Close, parameter[]]
<ast.Raise object at 0x7da1b0679060>
name[self]._current_offset assign[=] constant[0]
name[self]._file_system assign[=] name[file_system]
name[self]._tsk_attribute assign[=] name[tsk_attribute]
name[self]._tsk_file assign[=] name[tsk_file]
if name[self]._tsk_attribute begin[:]
name[self]._size assign[=] name[self]._tsk_attribute.info.size | keyword[def] identifier[_Open] ( identifier[self] , identifier[path_spec] = keyword[None] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[path_spec] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[data_stream] = identifier[getattr] ( identifier[path_spec] , literal[string] , keyword[None] )
identifier[file_system] = identifier[resolver] . identifier[Resolver] . identifier[OpenFileSystem] (
identifier[path_spec] , identifier[resolver_context] = identifier[self] . identifier[_resolver_context] )
identifier[file_entry] = identifier[file_system] . identifier[GetFileEntryByPathSpec] ( identifier[path_spec] )
keyword[if] keyword[not] identifier[file_entry] :
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] ( literal[string] )
identifier[tsk_file] = identifier[file_entry] . identifier[GetTSKFile] ()
identifier[tsk_attribute] = keyword[None]
keyword[if] identifier[getattr] ( identifier[tsk_file] , literal[string] , keyword[None] ) keyword[is] keyword[None] :
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] ( literal[string] )
keyword[if] identifier[getattr] ( identifier[tsk_file] . identifier[info] , literal[string] , keyword[None] ) keyword[is] keyword[None] :
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] (
literal[string] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[tsk_file] . identifier[info] . identifier[meta] , literal[string] ):
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] (
literal[string] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[tsk_file] . identifier[info] . identifier[meta] , literal[string] ):
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] (
literal[string] )
keyword[if] identifier[data_stream] :
keyword[for] identifier[attribute] keyword[in] identifier[tsk_file] :
keyword[if] identifier[getattr] ( identifier[attribute] , literal[string] , keyword[None] ) keyword[is] keyword[None] :
keyword[continue]
identifier[attribute_name] = identifier[getattr] ( identifier[attribute] . identifier[info] , literal[string] , keyword[None] )
keyword[if] identifier[attribute_name] keyword[is] keyword[None] :
identifier[attribute_name] = literal[string]
keyword[else] :
keyword[try] :
identifier[attribute_name] = identifier[attribute_name] . identifier[decode] ( literal[string] )
keyword[except] identifier[UnicodeError] :
keyword[continue]
identifier[attribute_type] = identifier[getattr] ( identifier[attribute] . identifier[info] , literal[string] , keyword[None] )
keyword[if] identifier[attribute_name] == identifier[data_stream] keyword[and] identifier[attribute_type] keyword[in] (
identifier[pytsk3] . identifier[TSK_FS_ATTR_TYPE_HFS_DEFAULT] ,
identifier[pytsk3] . identifier[TSK_FS_ATTR_TYPE_HFS_DATA] ,
identifier[pytsk3] . identifier[TSK_FS_ATTR_TYPE_NTFS_DATA] ):
identifier[tsk_attribute] = identifier[attribute]
keyword[break]
keyword[if] identifier[tsk_attribute] keyword[is] keyword[None] :
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[data_stream] ))
keyword[if] ( keyword[not] identifier[tsk_attribute] keyword[and]
identifier[tsk_file] . identifier[info] . identifier[meta] . identifier[type] != identifier[pytsk3] . identifier[TSK_FS_META_TYPE_REG] ):
identifier[file_system] . identifier[Close] ()
keyword[raise] identifier[IOError] ( literal[string] )
identifier[self] . identifier[_current_offset] = literal[int]
identifier[self] . identifier[_file_system] = identifier[file_system]
identifier[self] . identifier[_tsk_attribute] = identifier[tsk_attribute]
identifier[self] . identifier[_tsk_file] = identifier[tsk_file]
keyword[if] identifier[self] . identifier[_tsk_attribute] :
identifier[self] . identifier[_size] = identifier[self] . identifier[_tsk_attribute] . identifier[info] . identifier[size]
keyword[else] :
identifier[self] . identifier[_size] = identifier[self] . identifier[_tsk_file] . identifier[info] . identifier[meta] . identifier[size] | def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec:
raise ValueError('Missing path specification.') # depends on [control=['if'], data=[]]
data_stream = getattr(path_spec, 'data_stream', None)
file_system = resolver.Resolver.OpenFileSystem(path_spec, resolver_context=self._resolver_context)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
file_system.Close()
raise IOError('Unable to retrieve file entry.') # depends on [control=['if'], data=[]]
tsk_file = file_entry.GetTSKFile()
tsk_attribute = None
# Note that because pytsk3.File does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_file, 'info', None) is None:
file_system.Close()
raise IOError('Missing attribute info in file (pytsk3.File).') # depends on [control=['if'], data=[]]
# Note that because pytsk3.TSK_FS_FILE does not explicitly defines meta
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_file.info, 'meta', None) is None:
file_system.Close()
raise IOError('Missing attribute meta in file.info pytsk3.TSK_FS_FILE).') # depends on [control=['if'], data=[]]
# Note that because pytsk3.TSK_FS_META does not explicitly defines size
# we need to check if the attribute exists.
if not hasattr(tsk_file.info.meta, 'size'):
file_system.Close()
raise IOError('Missing attribute size in file.info.meta (pytsk3.TSK_FS_META).') # depends on [control=['if'], data=[]]
# Note that because pytsk3.TSK_FS_META does not explicitly defines type
# we need to check if the attribute exists.
if not hasattr(tsk_file.info.meta, 'type'):
file_system.Close()
raise IOError('Missing attribute type in file.info.meta (pytsk3.TSK_FS_META).') # depends on [control=['if'], data=[]]
if data_stream:
for attribute in tsk_file:
if getattr(attribute, 'info', None) is None:
continue # depends on [control=['if'], data=[]]
# The value of the attribute name will be None for the default
# data stream.
attribute_name = getattr(attribute.info, 'name', None)
if attribute_name is None:
attribute_name = '' # depends on [control=['if'], data=['attribute_name']]
else:
try:
# pytsk3 returns an UTF-8 encoded byte string.
attribute_name = attribute_name.decode('utf8') # depends on [control=['try'], data=[]]
except UnicodeError:
# Continue here since we cannot represent the attribute name.
continue # depends on [control=['except'], data=[]]
attribute_type = getattr(attribute.info, 'type', None)
if attribute_name == data_stream and attribute_type in (pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT, pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA, pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA):
tsk_attribute = attribute
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attribute']]
if tsk_attribute is None:
file_system.Close()
raise IOError('Unable to open data stream: {0:s}.'.format(data_stream)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not tsk_attribute and tsk_file.info.meta.type != pytsk3.TSK_FS_META_TYPE_REG:
file_system.Close()
raise IOError('Not a regular file.') # depends on [control=['if'], data=[]]
self._current_offset = 0
self._file_system = file_system
self._tsk_attribute = tsk_attribute
self._tsk_file = tsk_file
if self._tsk_attribute:
self._size = self._tsk_attribute.info.size # depends on [control=['if'], data=[]]
else:
self._size = self._tsk_file.info.meta.size |
def to_dict(self):
"""
dump a representation of the nparray object to a dictionary. The
nparray object should then be able to be fully restored via
nparray.from_dict
"""
def _json_safe(v):
if isinstance(v, np.ndarray):
return v.tolist()
elif is_unit(v)[0]:
return v.to_string()
else:
return v
d = {k:_json_safe(v) for k,v in self._descriptors.items()}
d['nparray'] = self.__class__.__name__.lower()
return d | def function[to_dict, parameter[self]]:
constant[
dump a representation of the nparray object to a dictionary. The
nparray object should then be able to be fully restored via
nparray.from_dict
]
def function[_json_safe, parameter[v]]:
if call[name[isinstance], parameter[name[v], name[np].ndarray]] begin[:]
return[call[name[v].tolist, parameter[]]]
variable[d] assign[=] <ast.DictComp object at 0x7da20e9b23e0>
call[name[d]][constant[nparray]] assign[=] call[name[self].__class__.__name__.lower, parameter[]]
return[name[d]] | keyword[def] identifier[to_dict] ( identifier[self] ):
literal[string]
keyword[def] identifier[_json_safe] ( identifier[v] ):
keyword[if] identifier[isinstance] ( identifier[v] , identifier[np] . identifier[ndarray] ):
keyword[return] identifier[v] . identifier[tolist] ()
keyword[elif] identifier[is_unit] ( identifier[v] )[ literal[int] ]:
keyword[return] identifier[v] . identifier[to_string] ()
keyword[else] :
keyword[return] identifier[v]
identifier[d] ={ identifier[k] : identifier[_json_safe] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[_descriptors] . identifier[items] ()}
identifier[d] [ literal[string] ]= identifier[self] . identifier[__class__] . identifier[__name__] . identifier[lower] ()
keyword[return] identifier[d] | def to_dict(self):
"""
dump a representation of the nparray object to a dictionary. The
nparray object should then be able to be fully restored via
nparray.from_dict
"""
def _json_safe(v):
if isinstance(v, np.ndarray):
return v.tolist() # depends on [control=['if'], data=[]]
elif is_unit(v)[0]:
return v.to_string() # depends on [control=['if'], data=[]]
else:
return v
d = {k: _json_safe(v) for (k, v) in self._descriptors.items()}
d['nparray'] = self.__class__.__name__.lower()
return d |
def get_known_subqueues(self):
""" Returns all known subqueues """
all_queues_from_mongodb = Queue.all_known(sources=("jobs", ))
idprefix = self.id
if not idprefix.endswith("/"):
idprefix += "/"
return {q for q in all_queues_from_mongodb if q.startswith(idprefix)} | def function[get_known_subqueues, parameter[self]]:
constant[ Returns all known subqueues ]
variable[all_queues_from_mongodb] assign[=] call[name[Queue].all_known, parameter[]]
variable[idprefix] assign[=] name[self].id
if <ast.UnaryOp object at 0x7da1b0847370> begin[:]
<ast.AugAssign object at 0x7da1b08474f0>
return[<ast.SetComp object at 0x7da1b0845a50>] | keyword[def] identifier[get_known_subqueues] ( identifier[self] ):
literal[string]
identifier[all_queues_from_mongodb] = identifier[Queue] . identifier[all_known] ( identifier[sources] =( literal[string] ,))
identifier[idprefix] = identifier[self] . identifier[id]
keyword[if] keyword[not] identifier[idprefix] . identifier[endswith] ( literal[string] ):
identifier[idprefix] += literal[string]
keyword[return] { identifier[q] keyword[for] identifier[q] keyword[in] identifier[all_queues_from_mongodb] keyword[if] identifier[q] . identifier[startswith] ( identifier[idprefix] )} | def get_known_subqueues(self):
""" Returns all known subqueues """
all_queues_from_mongodb = Queue.all_known(sources=('jobs',))
idprefix = self.id
if not idprefix.endswith('/'):
idprefix += '/' # depends on [control=['if'], data=[]]
return {q for q in all_queues_from_mongodb if q.startswith(idprefix)} |
def size_threshold(img, thr, comp='lt', structure = None):
r"""
Removes binary objects from an image identified by a size threshold.
The unconnected binary objects in an image are identified and all removed
whose size compares (e.g. less-than) to a supplied threshold value.
The threshold ``thr`` can be any positive integer value. The comparison operator
can be one of lt, le, gt, ge, ne or eq. The operators used are the functions of
the same name supplied by the `operator` module of python.
Parameters
----------
img : array_like
An array containing connected objects. Will be cast to type numpy.bool.
thr : int
Integer defining the threshold size of the binary objects to remove.
comp : {'lt', 'le', 'gt', 'ge', 'ne', 'eq'}
The type of comparison to perform. Use e.g. 'lt' for less-than.
structure : array of ints, optional
A structuring element that defines feature connections.
``structure`` must be symmetric. If no structuring element is provided,
one is automatically generated with a squared connectivity equal to
one. That is, for a 2-D ``input`` array, the default structuring element
is::
[[0,1,0],
[1,1,1],
[0,1,0]]
Returns
-------
binary_image : ndarray
The supplied binary image with all objects removed that positively compare
to the threshold ``thr`` using the comparison operator defined with ``comp``.
Notes
-----
If your voxel size is no isotrop i.e. of side-length 1 for all dimensions, simply
divide the supplied threshold through the real voxel size.
"""
operators = {'lt': lt, 'le': le, 'gt': gt, 'ge': ge, 'eq': eq, 'ne': ne}
img = numpy.asarray(img).astype(numpy.bool)
if comp not in operators:
raise ValueError("comp must be one of {}".format(list(operators.keys())))
comp = operators[comp]
labeled_array, num_features = label(img, structure)
for oidx in range(1, num_features + 1):
omask = labeled_array == oidx
if comp(numpy.count_nonzero(omask), thr):
img[omask] = False
return img | def function[size_threshold, parameter[img, thr, comp, structure]]:
constant[
Removes binary objects from an image identified by a size threshold.
The unconnected binary objects in an image are identified and all removed
whose size compares (e.g. less-than) to a supplied threshold value.
The threshold ``thr`` can be any positive integer value. The comparison operator
can be one of lt, le, gt, ge, ne or eq. The operators used are the functions of
the same name supplied by the `operator` module of python.
Parameters
----------
img : array_like
An array containing connected objects. Will be cast to type numpy.bool.
thr : int
Integer defining the threshold size of the binary objects to remove.
comp : {'lt', 'le', 'gt', 'ge', 'ne', 'eq'}
The type of comparison to perform. Use e.g. 'lt' for less-than.
structure : array of ints, optional
A structuring element that defines feature connections.
``structure`` must be symmetric. If no structuring element is provided,
one is automatically generated with a squared connectivity equal to
one. That is, for a 2-D ``input`` array, the default structuring element
is::
[[0,1,0],
[1,1,1],
[0,1,0]]
Returns
-------
binary_image : ndarray
The supplied binary image with all objects removed that positively compare
to the threshold ``thr`` using the comparison operator defined with ``comp``.
Notes
-----
If your voxel size is no isotrop i.e. of side-length 1 for all dimensions, simply
divide the supplied threshold through the real voxel size.
]
variable[operators] assign[=] dictionary[[<ast.Constant object at 0x7da204620fd0>, <ast.Constant object at 0x7da204621000>, <ast.Constant object at 0x7da2046215a0>, <ast.Constant object at 0x7da204621b70>, <ast.Constant object at 0x7da204623940>, <ast.Constant object at 0x7da1b12d95a0>], [<ast.Name object at 0x7da1b12d8340>, <ast.Name object at 0x7da1b12db3a0>, <ast.Name object at 0x7da1b12d9cc0>, <ast.Name object at 0x7da1b12db190>, <ast.Name object at 0x7da1b12d80a0>, <ast.Name object at 0x7da1b12da4d0>]]
variable[img] assign[=] call[call[name[numpy].asarray, parameter[name[img]]].astype, parameter[name[numpy].bool]]
if compare[name[comp] <ast.NotIn object at 0x7da2590d7190> name[operators]] begin[:]
<ast.Raise object at 0x7da1b12d9b40>
variable[comp] assign[=] call[name[operators]][name[comp]]
<ast.Tuple object at 0x7da1b113e7a0> assign[=] call[name[label], parameter[name[img], name[structure]]]
for taget[name[oidx]] in starred[call[name[range], parameter[constant[1], binary_operation[name[num_features] + constant[1]]]]] begin[:]
variable[omask] assign[=] compare[name[labeled_array] equal[==] name[oidx]]
if call[name[comp], parameter[call[name[numpy].count_nonzero, parameter[name[omask]]], name[thr]]] begin[:]
call[name[img]][name[omask]] assign[=] constant[False]
return[name[img]] | keyword[def] identifier[size_threshold] ( identifier[img] , identifier[thr] , identifier[comp] = literal[string] , identifier[structure] = keyword[None] ):
literal[string]
identifier[operators] ={ literal[string] : identifier[lt] , literal[string] : identifier[le] , literal[string] : identifier[gt] , literal[string] : identifier[ge] , literal[string] : identifier[eq] , literal[string] : identifier[ne] }
identifier[img] = identifier[numpy] . identifier[asarray] ( identifier[img] ). identifier[astype] ( identifier[numpy] . identifier[bool] )
keyword[if] identifier[comp] keyword[not] keyword[in] identifier[operators] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[list] ( identifier[operators] . identifier[keys] ())))
identifier[comp] = identifier[operators] [ identifier[comp] ]
identifier[labeled_array] , identifier[num_features] = identifier[label] ( identifier[img] , identifier[structure] )
keyword[for] identifier[oidx] keyword[in] identifier[range] ( literal[int] , identifier[num_features] + literal[int] ):
identifier[omask] = identifier[labeled_array] == identifier[oidx]
keyword[if] identifier[comp] ( identifier[numpy] . identifier[count_nonzero] ( identifier[omask] ), identifier[thr] ):
identifier[img] [ identifier[omask] ]= keyword[False]
keyword[return] identifier[img] | def size_threshold(img, thr, comp='lt', structure=None):
"""
Removes binary objects from an image identified by a size threshold.
The unconnected binary objects in an image are identified and all removed
whose size compares (e.g. less-than) to a supplied threshold value.
The threshold ``thr`` can be any positive integer value. The comparison operator
can be one of lt, le, gt, ge, ne or eq. The operators used are the functions of
the same name supplied by the `operator` module of python.
Parameters
----------
img : array_like
An array containing connected objects. Will be cast to type numpy.bool.
thr : int
Integer defining the threshold size of the binary objects to remove.
comp : {'lt', 'le', 'gt', 'ge', 'ne', 'eq'}
The type of comparison to perform. Use e.g. 'lt' for less-than.
structure : array of ints, optional
A structuring element that defines feature connections.
``structure`` must be symmetric. If no structuring element is provided,
one is automatically generated with a squared connectivity equal to
one. That is, for a 2-D ``input`` array, the default structuring element
is::
[[0,1,0],
[1,1,1],
[0,1,0]]
Returns
-------
binary_image : ndarray
The supplied binary image with all objects removed that positively compare
to the threshold ``thr`` using the comparison operator defined with ``comp``.
Notes
-----
If your voxel size is no isotrop i.e. of side-length 1 for all dimensions, simply
divide the supplied threshold through the real voxel size.
"""
operators = {'lt': lt, 'le': le, 'gt': gt, 'ge': ge, 'eq': eq, 'ne': ne}
img = numpy.asarray(img).astype(numpy.bool)
if comp not in operators:
raise ValueError('comp must be one of {}'.format(list(operators.keys()))) # depends on [control=['if'], data=['operators']]
comp = operators[comp]
(labeled_array, num_features) = label(img, structure)
for oidx in range(1, num_features + 1):
omask = labeled_array == oidx
if comp(numpy.count_nonzero(omask), thr):
img[omask] = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['oidx']]
return img |
def handle(cls, vm, args):
"""
Setup forwarding connection to given VM and pipe docker cmds over SSH.
"""
docker = Iaas.info(vm)
if not docker:
raise Exception('docker vm %s not found' % vm)
if docker['state'] != 'running':
Iaas.start(vm)
# XXX
remote_addr = docker['ifaces'][0]['ips'][0]['ip']
port = unixpipe.setup(remote_addr, 'root', '/var/run/docker.sock')
os.environ['DOCKER_HOST'] = 'tcp://localhost:%d' % port
cls.echo('using DOCKER_HOST=%s' % os.environ['DOCKER_HOST'])
subprocess.call(['docker'] + list(args)) | def function[handle, parameter[cls, vm, args]]:
constant[
Setup forwarding connection to given VM and pipe docker cmds over SSH.
]
variable[docker] assign[=] call[name[Iaas].info, parameter[name[vm]]]
if <ast.UnaryOp object at 0x7da18eb57be0> begin[:]
<ast.Raise object at 0x7da18eb57d00>
if compare[call[name[docker]][constant[state]] not_equal[!=] constant[running]] begin[:]
call[name[Iaas].start, parameter[name[vm]]]
variable[remote_addr] assign[=] call[call[call[call[call[name[docker]][constant[ifaces]]][constant[0]]][constant[ips]]][constant[0]]][constant[ip]]
variable[port] assign[=] call[name[unixpipe].setup, parameter[name[remote_addr], constant[root], constant[/var/run/docker.sock]]]
call[name[os].environ][constant[DOCKER_HOST]] assign[=] binary_operation[constant[tcp://localhost:%d] <ast.Mod object at 0x7da2590d6920> name[port]]
call[name[cls].echo, parameter[binary_operation[constant[using DOCKER_HOST=%s] <ast.Mod object at 0x7da2590d6920> call[name[os].environ][constant[DOCKER_HOST]]]]]
call[name[subprocess].call, parameter[binary_operation[list[[<ast.Constant object at 0x7da18eb56800>]] + call[name[list], parameter[name[args]]]]]] | keyword[def] identifier[handle] ( identifier[cls] , identifier[vm] , identifier[args] ):
literal[string]
identifier[docker] = identifier[Iaas] . identifier[info] ( identifier[vm] )
keyword[if] keyword[not] identifier[docker] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[vm] )
keyword[if] identifier[docker] [ literal[string] ]!= literal[string] :
identifier[Iaas] . identifier[start] ( identifier[vm] )
identifier[remote_addr] = identifier[docker] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[int] ][ literal[string] ]
identifier[port] = identifier[unixpipe] . identifier[setup] ( identifier[remote_addr] , literal[string] , literal[string] )
identifier[os] . identifier[environ] [ literal[string] ]= literal[string] % identifier[port]
identifier[cls] . identifier[echo] ( literal[string] % identifier[os] . identifier[environ] [ literal[string] ])
identifier[subprocess] . identifier[call] ([ literal[string] ]+ identifier[list] ( identifier[args] )) | def handle(cls, vm, args):
"""
Setup forwarding connection to given VM and pipe docker cmds over SSH.
"""
docker = Iaas.info(vm)
if not docker:
raise Exception('docker vm %s not found' % vm) # depends on [control=['if'], data=[]]
if docker['state'] != 'running':
Iaas.start(vm) # depends on [control=['if'], data=[]]
# XXX
remote_addr = docker['ifaces'][0]['ips'][0]['ip']
port = unixpipe.setup(remote_addr, 'root', '/var/run/docker.sock')
os.environ['DOCKER_HOST'] = 'tcp://localhost:%d' % port
cls.echo('using DOCKER_HOST=%s' % os.environ['DOCKER_HOST'])
subprocess.call(['docker'] + list(args)) |
def obtain_all_bond_lengths(sp1, sp2, default_bl=None):
"""
Obtain bond lengths for all bond orders from bond length database
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
default_bl: If a particular type of bond does not exist, use this
bond length as a default value (bond order = 1).
If None, a ValueError will be thrown.
Return:
A dict mapping bond order to bond length in angstrom
"""
if isinstance(sp1, Element):
sp1 = sp1.symbol
if isinstance(sp2, Element):
sp2 = sp2.symbol
syms = tuple(sorted([sp1, sp2]))
if syms in bond_lengths:
return bond_lengths[syms].copy()
elif default_bl is not None:
return {1: default_bl}
else:
raise ValueError("No bond data for elements {} - {}".format(*syms)) | def function[obtain_all_bond_lengths, parameter[sp1, sp2, default_bl]]:
constant[
Obtain bond lengths for all bond orders from bond length database
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
default_bl: If a particular type of bond does not exist, use this
bond length as a default value (bond order = 1).
If None, a ValueError will be thrown.
Return:
A dict mapping bond order to bond length in angstrom
]
if call[name[isinstance], parameter[name[sp1], name[Element]]] begin[:]
variable[sp1] assign[=] name[sp1].symbol
if call[name[isinstance], parameter[name[sp2], name[Element]]] begin[:]
variable[sp2] assign[=] name[sp2].symbol
variable[syms] assign[=] call[name[tuple], parameter[call[name[sorted], parameter[list[[<ast.Name object at 0x7da18c4cc520>, <ast.Name object at 0x7da18c4cd210>]]]]]]
if compare[name[syms] in name[bond_lengths]] begin[:]
return[call[call[name[bond_lengths]][name[syms]].copy, parameter[]]] | keyword[def] identifier[obtain_all_bond_lengths] ( identifier[sp1] , identifier[sp2] , identifier[default_bl] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[sp1] , identifier[Element] ):
identifier[sp1] = identifier[sp1] . identifier[symbol]
keyword[if] identifier[isinstance] ( identifier[sp2] , identifier[Element] ):
identifier[sp2] = identifier[sp2] . identifier[symbol]
identifier[syms] = identifier[tuple] ( identifier[sorted] ([ identifier[sp1] , identifier[sp2] ]))
keyword[if] identifier[syms] keyword[in] identifier[bond_lengths] :
keyword[return] identifier[bond_lengths] [ identifier[syms] ]. identifier[copy] ()
keyword[elif] identifier[default_bl] keyword[is] keyword[not] keyword[None] :
keyword[return] { literal[int] : identifier[default_bl] }
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (* identifier[syms] )) | def obtain_all_bond_lengths(sp1, sp2, default_bl=None):
"""
Obtain bond lengths for all bond orders from bond length database
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
default_bl: If a particular type of bond does not exist, use this
bond length as a default value (bond order = 1).
If None, a ValueError will be thrown.
Return:
A dict mapping bond order to bond length in angstrom
"""
if isinstance(sp1, Element):
sp1 = sp1.symbol # depends on [control=['if'], data=[]]
if isinstance(sp2, Element):
sp2 = sp2.symbol # depends on [control=['if'], data=[]]
syms = tuple(sorted([sp1, sp2]))
if syms in bond_lengths:
return bond_lengths[syms].copy() # depends on [control=['if'], data=['syms', 'bond_lengths']]
elif default_bl is not None:
return {1: default_bl} # depends on [control=['if'], data=['default_bl']]
else:
raise ValueError('No bond data for elements {} - {}'.format(*syms)) |
def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
'''
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
'''
return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile) | def function[attach_usage_plan_to_apis, parameter[plan_id, apis, region, key, keyid, profile]]:
constant[
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
]
return[call[name[_update_usage_plan_apis], parameter[name[plan_id], name[apis], constant[add]]]] | keyword[def] identifier[attach_usage_plan_to_apis] ( identifier[plan_id] , identifier[apis] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
keyword[return] identifier[_update_usage_plan_apis] ( identifier[plan_id] , identifier[apis] , literal[string] , identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) | def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
"""
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
"""
return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile) |
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('xenial', None): self.xenial_mitaka,
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
}
return releases[(self.series, self.openstack)] | def function[_get_openstack_release, parameter[self]]:
constant[Get openstack release.
Return an integer representing the enum value of the openstack
release.
]
for taget[tuple[[<ast.Name object at 0x7da18f00e5c0>, <ast.Name object at 0x7da18f00d300>]]] in starred[call[name[enumerate], parameter[name[OPENSTACK_RELEASES_PAIRS]]]] begin[:]
call[name[setattr], parameter[name[self], name[os_pair], name[i]]]
variable[releases] assign[=] dictionary[[<ast.Tuple object at 0x7da18f00fdc0>, <ast.Tuple object at 0x7da18f00ebc0>, <ast.Tuple object at 0x7da18f00c2b0>, <ast.Tuple object at 0x7da18f00fc10>, <ast.Tuple object at 0x7da18f00dbd0>, <ast.Tuple object at 0x7da18f00c130>, <ast.Tuple object at 0x7da18f00d270>, <ast.Tuple object at 0x7da18f00cc40>, <ast.Tuple object at 0x7da18f00f3a0>, <ast.Tuple object at 0x7da18f00c610>, <ast.Tuple object at 0x7da1b12498a0>, <ast.Tuple object at 0x7da1b124a080>, <ast.Tuple object at 0x7da1b1249090>, <ast.Tuple object at 0x7da1b1249390>, <ast.Tuple object at 0x7da1b124a590>, <ast.Tuple object at 0x7da1b1248df0>, <ast.Tuple object at 0x7da1b1248850>], [<ast.Attribute object at 0x7da1b124ad70>, <ast.Attribute object at 0x7da1b124af80>, <ast.Attribute object at 0x7da1b124a020>, <ast.Attribute object at 0x7da1b1249150>, <ast.Attribute object at 0x7da1b12491e0>, <ast.Attribute object at 0x7da1b1249540>, <ast.Attribute object at 0x7da1b124a5c0>, <ast.Attribute object at 0x7da1b1248430>, <ast.Attribute object at 0x7da1b1249420>, <ast.Attribute object at 0x7da1b1248f70>, <ast.Attribute object at 0x7da1b1248f40>, <ast.Attribute object at 0x7da1b1249810>, <ast.Attribute object at 0x7da1b12493c0>, <ast.Attribute object at 0x7da1b1249780>, <ast.Attribute object at 0x7da1b124a260>, <ast.Attribute object at 0x7da1b1249720>, <ast.Attribute object at 0x7da1b12482b0>]]
return[call[name[releases]][tuple[[<ast.Attribute object at 0x7da1b1249120>, <ast.Attribute object at 0x7da1b12496f0>]]]] | keyword[def] identifier[_get_openstack_release] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] , identifier[os_pair] keyword[in] identifier[enumerate] ( identifier[OPENSTACK_RELEASES_PAIRS] ):
identifier[setattr] ( identifier[self] , identifier[os_pair] , identifier[i] )
identifier[releases] ={
( literal[string] , keyword[None] ): identifier[self] . identifier[trusty_icehouse] ,
( literal[string] , literal[string] ): identifier[self] . identifier[trusty_kilo] ,
( literal[string] , literal[string] ): identifier[self] . identifier[trusty_liberty] ,
( literal[string] , literal[string] ): identifier[self] . identifier[trusty_mitaka] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[xenial_mitaka] ,
( literal[string] , literal[string] ): identifier[self] . identifier[xenial_newton] ,
( literal[string] , literal[string] ): identifier[self] . identifier[xenial_ocata] ,
( literal[string] , literal[string] ): identifier[self] . identifier[xenial_pike] ,
( literal[string] , literal[string] ): identifier[self] . identifier[xenial_queens] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[yakkety_newton] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[zesty_ocata] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[artful_pike] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[bionic_queens] ,
( literal[string] , literal[string] ): identifier[self] . identifier[bionic_rocky] ,
( literal[string] , literal[string] ): identifier[self] . identifier[bionic_stein] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[cosmic_rocky] ,
( literal[string] , keyword[None] ): identifier[self] . identifier[disco_stein] ,
}
keyword[return] identifier[releases] [( identifier[self] . identifier[series] , identifier[self] . identifier[openstack] )] | def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
for (i, os_pair) in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i) # depends on [control=['for'], data=[]]
releases = {('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein}
return releases[self.series, self.openstack] |
def delete_servers(self, server_id):
"""
Requires: account ID, server ID
Input should be server id
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Delete
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml".format(
endpoint=endpoint,
account_id=self.account_id,
server_id=server_id)
response = self._make_delete_request(uri)
failed_deletions = []
for server in response.findall('.//server'):
if not 'deleted' in server.findall('.//result')[0].text:
failed_deletions.append({'server_id': server.get('id')})
return failed_deletions | def function[delete_servers, parameter[self, server_id]]:
constant[
Requires: account ID, server ID
Input should be server id
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Delete
]
variable[endpoint] assign[=] constant[https://api.newrelic.com]
variable[uri] assign[=] call[constant[{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml].format, parameter[]]
variable[response] assign[=] call[name[self]._make_delete_request, parameter[name[uri]]]
variable[failed_deletions] assign[=] list[[]]
for taget[name[server]] in starred[call[name[response].findall, parameter[constant[.//server]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0baab90> begin[:]
call[name[failed_deletions].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0ba80d0>], [<ast.Call object at 0x7da1b0ba8340>]]]]
return[name[failed_deletions]] | keyword[def] identifier[delete_servers] ( identifier[self] , identifier[server_id] ):
literal[string]
identifier[endpoint] = literal[string]
identifier[uri] = literal[string] . identifier[format] (
identifier[endpoint] = identifier[endpoint] ,
identifier[account_id] = identifier[self] . identifier[account_id] ,
identifier[server_id] = identifier[server_id] )
identifier[response] = identifier[self] . identifier[_make_delete_request] ( identifier[uri] )
identifier[failed_deletions] =[]
keyword[for] identifier[server] keyword[in] identifier[response] . identifier[findall] ( literal[string] ):
keyword[if] keyword[not] literal[string] keyword[in] identifier[server] . identifier[findall] ( literal[string] )[ literal[int] ]. identifier[text] :
identifier[failed_deletions] . identifier[append] ({ literal[string] : identifier[server] . identifier[get] ( literal[string] )})
keyword[return] identifier[failed_deletions] | def delete_servers(self, server_id):
"""
Requires: account ID, server ID
Input should be server id
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Delete
"""
endpoint = 'https://api.newrelic.com'
uri = '{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml'.format(endpoint=endpoint, account_id=self.account_id, server_id=server_id)
response = self._make_delete_request(uri)
failed_deletions = []
for server in response.findall('.//server'):
if not 'deleted' in server.findall('.//result')[0].text:
failed_deletions.append({'server_id': server.get('id')}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['server']]
return failed_deletions |
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key) | def function[update_index, parameter[self, key, value]]:
constant[Update the index with the new key/values.]
for taget[tuple[[<ast.Name object at 0x7da2047e98a0>, <ast.Name object at 0x7da2047e8730>]]] in starred[call[name[value].items, parameter[]]] begin[:]
if compare[name[k] in name[self].indexes] begin[:]
if <ast.UnaryOp object at 0x7da2047e9060> begin[:]
call[call[name[self].index_defs][name[k]]][constant[type]] assign[=] constant[lazy]
if compare[name[v] <ast.NotIn object at 0x7da2590d7190> call[name[self].indexes][name[k]]] begin[:]
call[call[name[self].indexes][name[k]]][name[v]] assign[=] call[name[set], parameter[list[[]]]]
call[call[call[name[self].indexes][name[k]]][name[v]].add, parameter[name[key]]] | keyword[def] identifier[update_index] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[value] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[self] . identifier[indexes] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[six] . identifier[string_types] ):
identifier[self] . identifier[index_defs] [ identifier[k] ][ literal[string] ]= literal[string]
keyword[if] identifier[v] keyword[not] keyword[in] identifier[self] . identifier[indexes] [ identifier[k] ]:
identifier[self] . identifier[indexes] [ identifier[k] ][ identifier[v] ]= identifier[set] ([])
identifier[self] . identifier[indexes] [ identifier[k] ][ identifier[v] ]. identifier[add] ( identifier[key] ) | def update_index(self, key, value):
"""Update the index with the new key/values."""
for (k, v) in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy' # depends on [control=['if'], data=[]]
if v not in self.indexes[k]:
self.indexes[k][v] = set([]) # depends on [control=['if'], data=['v']]
self.indexes[k][v].add(key) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]] |
def assertFileExists(self, filename, msg=None):
'''Fail if ``filename`` does not exist as determined by
``os.path.isfile(filename)``.
Parameters
----------
filename : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
'''
standardMsg = '%s does not exist' % filename
if not os.path.isfile(filename):
self.fail(self._formatMessage(msg, standardMsg)) | def function[assertFileExists, parameter[self, filename, msg]]:
constant[Fail if ``filename`` does not exist as determined by
``os.path.isfile(filename)``.
Parameters
----------
filename : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
]
variable[standardMsg] assign[=] binary_operation[constant[%s does not exist] <ast.Mod object at 0x7da2590d6920> name[filename]]
if <ast.UnaryOp object at 0x7da1b07f73d0> begin[:]
call[name[self].fail, parameter[call[name[self]._formatMessage, parameter[name[msg], name[standardMsg]]]]] | keyword[def] identifier[assertFileExists] ( identifier[self] , identifier[filename] , identifier[msg] = keyword[None] ):
literal[string]
identifier[standardMsg] = literal[string] % identifier[filename]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ):
identifier[self] . identifier[fail] ( identifier[self] . identifier[_formatMessage] ( identifier[msg] , identifier[standardMsg] )) | def assertFileExists(self, filename, msg=None):
"""Fail if ``filename`` does not exist as determined by
``os.path.isfile(filename)``.
Parameters
----------
filename : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
"""
standardMsg = '%s does not exist' % filename
if not os.path.isfile(filename):
self.fail(self._formatMessage(msg, standardMsg)) # depends on [control=['if'], data=[]] |
def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
if sys.version_info < (3, 5):
raise NotImplementedError("This is only available for python3.5 "
"or above")
closures = getattr(func, '__closure__', [])
if closures is None:
closures = []
is_deprecated = ('deprecated' in ''.join([c.cell_contents
for c in closures
if isinstance(c.cell_contents, str)]))
return is_deprecated | def function[_is_deprecated, parameter[func]]:
constant[Helper to check if func is wraped by our deprecated decorator]
if compare[name[sys].version_info less[<] tuple[[<ast.Constant object at 0x7da1b18ddae0>, <ast.Constant object at 0x7da1b18dc130>]]] begin[:]
<ast.Raise object at 0x7da1b18dfc70>
variable[closures] assign[=] call[name[getattr], parameter[name[func], constant[__closure__], list[[]]]]
if compare[name[closures] is constant[None]] begin[:]
variable[closures] assign[=] list[[]]
variable[is_deprecated] assign[=] compare[constant[deprecated] in call[constant[].join, parameter[<ast.ListComp object at 0x7da1b18df910>]]]
return[name[is_deprecated]] | keyword[def] identifier[_is_deprecated] ( identifier[func] ):
literal[string]
keyword[if] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ):
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] )
identifier[closures] = identifier[getattr] ( identifier[func] , literal[string] ,[])
keyword[if] identifier[closures] keyword[is] keyword[None] :
identifier[closures] =[]
identifier[is_deprecated] =( literal[string] keyword[in] literal[string] . identifier[join] ([ identifier[c] . identifier[cell_contents]
keyword[for] identifier[c] keyword[in] identifier[closures]
keyword[if] identifier[isinstance] ( identifier[c] . identifier[cell_contents] , identifier[str] )]))
keyword[return] identifier[is_deprecated] | def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
if sys.version_info < (3, 5):
raise NotImplementedError('This is only available for python3.5 or above') # depends on [control=['if'], data=[]]
closures = getattr(func, '__closure__', [])
if closures is None:
closures = [] # depends on [control=['if'], data=['closures']]
is_deprecated = 'deprecated' in ''.join([c.cell_contents for c in closures if isinstance(c.cell_contents, str)])
return is_deprecated |
def load(ctx, input, output):
"""Read clusters from file and save to model file."""
log.debug('chemdataextractor.cluster.load')
import pickle
click.echo('Reading %s' % input.name)
clusters = {}
for line in input.readlines():
cluster, word, freq = line.split()
clusters[word] = cluster
pickle.dump(clusters, output, protocol=pickle.HIGHEST_PROTOCOL) | def function[load, parameter[ctx, input, output]]:
constant[Read clusters from file and save to model file.]
call[name[log].debug, parameter[constant[chemdataextractor.cluster.load]]]
import module[pickle]
call[name[click].echo, parameter[binary_operation[constant[Reading %s] <ast.Mod object at 0x7da2590d6920> name[input].name]]]
variable[clusters] assign[=] dictionary[[], []]
for taget[name[line]] in starred[call[name[input].readlines, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18dc98340> assign[=] call[name[line].split, parameter[]]
call[name[clusters]][name[word]] assign[=] name[cluster]
call[name[pickle].dump, parameter[name[clusters], name[output]]] | keyword[def] identifier[load] ( identifier[ctx] , identifier[input] , identifier[output] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
keyword[import] identifier[pickle]
identifier[click] . identifier[echo] ( literal[string] % identifier[input] . identifier[name] )
identifier[clusters] ={}
keyword[for] identifier[line] keyword[in] identifier[input] . identifier[readlines] ():
identifier[cluster] , identifier[word] , identifier[freq] = identifier[line] . identifier[split] ()
identifier[clusters] [ identifier[word] ]= identifier[cluster]
identifier[pickle] . identifier[dump] ( identifier[clusters] , identifier[output] , identifier[protocol] = identifier[pickle] . identifier[HIGHEST_PROTOCOL] ) | def load(ctx, input, output):
"""Read clusters from file and save to model file."""
log.debug('chemdataextractor.cluster.load')
import pickle
click.echo('Reading %s' % input.name)
clusters = {}
for line in input.readlines():
(cluster, word, freq) = line.split()
clusters[word] = cluster # depends on [control=['for'], data=['line']]
pickle.dump(clusters, output, protocol=pickle.HIGHEST_PROTOCOL) |
def get(self, sid):
"""
Constructs a ShortCodeContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.short_code.ShortCodeContext
:rtype: twilio.rest.proxy.v1.service.short_code.ShortCodeContext
"""
return ShortCodeContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | def function[get, parameter[self, sid]]:
constant[
Constructs a ShortCodeContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.short_code.ShortCodeContext
:rtype: twilio.rest.proxy.v1.service.short_code.ShortCodeContext
]
return[call[name[ShortCodeContext], parameter[name[self]._version]]] | keyword[def] identifier[get] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[return] identifier[ShortCodeContext] ( identifier[self] . identifier[_version] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[sid] = identifier[sid] ,) | def get(self, sid):
"""
Constructs a ShortCodeContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.short_code.ShortCodeContext
:rtype: twilio.rest.proxy.v1.service.short_code.ShortCodeContext
"""
return ShortCodeContext(self._version, service_sid=self._solution['service_sid'], sid=sid) |
def has_subscriber(self, user):
""" Returns ``True`` if the given user is a subscriber of this topic. """
if not hasattr(self, '_subscribers'):
self._subscribers = list(self.subscribers.all())
return user in self._subscribers | def function[has_subscriber, parameter[self, user]]:
constant[ Returns ``True`` if the given user is a subscriber of this topic. ]
if <ast.UnaryOp object at 0x7da1b12c9600> begin[:]
name[self]._subscribers assign[=] call[name[list], parameter[call[name[self].subscribers.all, parameter[]]]]
return[compare[name[user] in name[self]._subscribers]] | keyword[def] identifier[has_subscriber] ( identifier[self] , identifier[user] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_subscribers] = identifier[list] ( identifier[self] . identifier[subscribers] . identifier[all] ())
keyword[return] identifier[user] keyword[in] identifier[self] . identifier[_subscribers] | def has_subscriber(self, user):
""" Returns ``True`` if the given user is a subscriber of this topic. """
if not hasattr(self, '_subscribers'):
self._subscribers = list(self.subscribers.all()) # depends on [control=['if'], data=[]]
return user in self._subscribers |
def _access_user_info(self):
"""
Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse`
"""
url = self.user_info_url.format(**self.user.__dict__)
return self.access(url) | def function[_access_user_info, parameter[self]]:
constant[
Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse`
]
variable[url] assign[=] call[name[self].user_info_url.format, parameter[]]
return[call[name[self].access, parameter[name[url]]]] | keyword[def] identifier[_access_user_info] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[user_info_url] . identifier[format] (** identifier[self] . identifier[user] . identifier[__dict__] )
keyword[return] identifier[self] . identifier[access] ( identifier[url] ) | def _access_user_info(self):
"""
Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse`
"""
url = self.user_info_url.format(**self.user.__dict__)
return self.access(url) |
def device_to_user(self, x, y):
"""Transform a coordinate from device space to user space
by multiplying the given point
by the inverse of the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(user_x, user_y)`` tuple of floats.
"""
xy = ffi.new('double[2]', [x, y])
cairo.cairo_device_to_user(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy) | def function[device_to_user, parameter[self, x, y]]:
constant[Transform a coordinate from device space to user space
by multiplying the given point
by the inverse of the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(user_x, user_y)`` tuple of floats.
]
variable[xy] assign[=] call[name[ffi].new, parameter[constant[double[2]], list[[<ast.Name object at 0x7da1b100fc40>, <ast.Name object at 0x7da1b100fbb0>]]]]
call[name[cairo].cairo_device_to_user, parameter[name[self]._pointer, binary_operation[name[xy] + constant[0]], binary_operation[name[xy] + constant[1]]]]
call[name[self]._check_status, parameter[]]
return[call[name[tuple], parameter[name[xy]]]] | keyword[def] identifier[device_to_user] ( identifier[self] , identifier[x] , identifier[y] ):
literal[string]
identifier[xy] = identifier[ffi] . identifier[new] ( literal[string] ,[ identifier[x] , identifier[y] ])
identifier[cairo] . identifier[cairo_device_to_user] ( identifier[self] . identifier[_pointer] , identifier[xy] + literal[int] , identifier[xy] + literal[int] )
identifier[self] . identifier[_check_status] ()
keyword[return] identifier[tuple] ( identifier[xy] ) | def device_to_user(self, x, y):
"""Transform a coordinate from device space to user space
by multiplying the given point
by the inverse of the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(user_x, user_y)`` tuple of floats.
"""
xy = ffi.new('double[2]', [x, y])
cairo.cairo_device_to_user(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy) |
def ADD(cpu, dest, src):
"""
Add.
Adds the first operand (destination operand) and the second operand (source operand)
and stores the result in the destination operand. When an immediate value is used as
an operand, it is sign-extended to the length of the destination operand format.
The ADD instruction does not distinguish between signed or unsigned operands. Instead,
the processor evaluates the result for both data types and sets the OF and CF flags to
indicate a carry in the signed or unsigned result, respectively. The SF flag indicates
the sign of the signed result::
DEST = DEST + SRC;
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
cpu._ADD(dest, src, carry=False) | def function[ADD, parameter[cpu, dest, src]]:
constant[
Add.
Adds the first operand (destination operand) and the second operand (source operand)
and stores the result in the destination operand. When an immediate value is used as
an operand, it is sign-extended to the length of the destination operand format.
The ADD instruction does not distinguish between signed or unsigned operands. Instead,
the processor evaluates the result for both data types and sets the OF and CF flags to
indicate a carry in the signed or unsigned result, respectively. The SF flag indicates
the sign of the signed result::
DEST = DEST + SRC;
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
]
call[name[cpu]._ADD, parameter[name[dest], name[src]]] | keyword[def] identifier[ADD] ( identifier[cpu] , identifier[dest] , identifier[src] ):
literal[string]
identifier[cpu] . identifier[_ADD] ( identifier[dest] , identifier[src] , identifier[carry] = keyword[False] ) | def ADD(cpu, dest, src):
"""
Add.
Adds the first operand (destination operand) and the second operand (source operand)
and stores the result in the destination operand. When an immediate value is used as
an operand, it is sign-extended to the length of the destination operand format.
The ADD instruction does not distinguish between signed or unsigned operands. Instead,
the processor evaluates the result for both data types and sets the OF and CF flags to
indicate a carry in the signed or unsigned result, respectively. The SF flag indicates
the sign of the signed result::
DEST = DEST + SRC;
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
cpu._ADD(dest, src, carry=False) |
def _match_item(item, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
"""
it = get_item_tags(item)
return any_all(
_match_field(
get_field(it, field), pattern, ignore_case=ignore_case, normalize_values=normalize_values
) for field, patterns in kwargs.items() for pattern in patterns
) | def function[_match_item, parameter[item, any_all, ignore_case, normalize_values]]:
constant[Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
]
variable[it] assign[=] call[name[get_item_tags], parameter[name[item]]]
return[call[name[any_all], parameter[<ast.GeneratorExp object at 0x7da1b2369bd0>]]] | keyword[def] identifier[_match_item] ( identifier[item] , identifier[any_all] = identifier[any] , identifier[ignore_case] = keyword[False] , identifier[normalize_values] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[it] = identifier[get_item_tags] ( identifier[item] )
keyword[return] identifier[any_all] (
identifier[_match_field] (
identifier[get_field] ( identifier[it] , identifier[field] ), identifier[pattern] , identifier[ignore_case] = identifier[ignore_case] , identifier[normalize_values] = identifier[normalize_values]
) keyword[for] identifier[field] , identifier[patterns] keyword[in] identifier[kwargs] . identifier[items] () keyword[for] identifier[pattern] keyword[in] identifier[patterns]
) | def _match_item(item, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
"""
it = get_item_tags(item)
return any_all((_match_field(get_field(it, field), pattern, ignore_case=ignore_case, normalize_values=normalize_values) for (field, patterns) in kwargs.items() for pattern in patterns)) |
def dht_findprovs(self, multihash, *multihashes, **kwargs):
"""Finds peers in the DHT that can provide a specific value.
.. code-block:: python
>>> c.dht_findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2")
[{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ',
'Extra': '', 'Type': 6, 'Responses': None},
{'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk',
'Extra': '', 'Type': 6, 'Responses': None},
{'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds',
'Extra': '', 'Type': 6, 'Responses': None},
…
{'ID': '', 'Extra': '', 'Type': 4, 'Responses': [
{'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None}
]},
{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ',
'Extra': '', 'Type': 1, 'Responses': [
{'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [
'/ip4/127.0.0.1/tcp/4001',
'/ip4/172.17.0.8/tcp/4001',
'/ip6/::1/tcp/4001',
'/ip4/52.32.109.74/tcp/1028'
]}
]}]
Parameters
----------
multihash : str
The DHT key to find providers for
Returns
-------
dict : List of provider Peer IDs
"""
args = (multihash,) + multihashes
return self._client.request('/dht/findprovs', args,
decoder='json', **kwargs) | def function[dht_findprovs, parameter[self, multihash]]:
constant[Finds peers in the DHT that can provide a specific value.
.. code-block:: python
>>> c.dht_findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2")
[{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ',
'Extra': '', 'Type': 6, 'Responses': None},
{'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk',
'Extra': '', 'Type': 6, 'Responses': None},
{'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds',
'Extra': '', 'Type': 6, 'Responses': None},
…
{'ID': '', 'Extra': '', 'Type': 4, 'Responses': [
{'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None}
]},
{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ',
'Extra': '', 'Type': 1, 'Responses': [
{'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [
'/ip4/127.0.0.1/tcp/4001',
'/ip4/172.17.0.8/tcp/4001',
'/ip6/::1/tcp/4001',
'/ip4/52.32.109.74/tcp/1028'
]}
]}]
Parameters
----------
multihash : str
The DHT key to find providers for
Returns
-------
dict : List of provider Peer IDs
]
variable[args] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da20c6c5450>]] + name[multihashes]]
return[call[name[self]._client.request, parameter[constant[/dht/findprovs], name[args]]]] | keyword[def] identifier[dht_findprovs] ( identifier[self] , identifier[multihash] ,* identifier[multihashes] ,** identifier[kwargs] ):
literal[string]
identifier[args] =( identifier[multihash] ,)+ identifier[multihashes]
keyword[return] identifier[self] . identifier[_client] . identifier[request] ( literal[string] , identifier[args] ,
identifier[decoder] = literal[string] ,** identifier[kwargs] ) | def dht_findprovs(self, multihash, *multihashes, **kwargs):
"""Finds peers in the DHT that can provide a specific value.
.. code-block:: python
>>> c.dht_findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2")
[{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ',
'Extra': '', 'Type': 6, 'Responses': None},
{'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk',
'Extra': '', 'Type': 6, 'Responses': None},
{'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds',
'Extra': '', 'Type': 6, 'Responses': None},
…
{'ID': '', 'Extra': '', 'Type': 4, 'Responses': [
{'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None}
]},
{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ',
'Extra': '', 'Type': 1, 'Responses': [
{'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [
'/ip4/127.0.0.1/tcp/4001',
'/ip4/172.17.0.8/tcp/4001',
'/ip6/::1/tcp/4001',
'/ip4/52.32.109.74/tcp/1028'
]}
]}]
Parameters
----------
multihash : str
The DHT key to find providers for
Returns
-------
dict : List of provider Peer IDs
"""
args = (multihash,) + multihashes
return self._client.request('/dht/findprovs', args, decoder='json', **kwargs) |
def get_fresh_content(top=4, additional=10, featured=False):
"""
Requires articles, photos and video packages to be installed.
Returns published *Featured* content (articles, galleries, video, etc)
and an additional batch of fresh regular (featured or not) content.
The number of objects returned is defined when the tag is called.
The top item type is defined in the sites admin for sites that
have the supersites app enabled.
If "featured" is True, will limit to only featured content.
Usage::
{% get_fresh_content 5 10 %}
Would return five top objects and 10 additional
{% get_fresh_content 4 8 featured %}
Would return four top objects and 8 additional, limited to featured content.
What you get::
'top_item': the top featured item
'top_item_type': the content type for the top item (article, gallery, video)
'featured': Additional featured items.
If you asked for 5 featureed items,
there will be four -- five minus the one that's in `top_item`.
'articles': featured articles, minus the top item
'galleries': featured galleries, minus the top item
'vids': featured video, minus the top item,
'more_articles': A stack of articles, excluding what's in featured,
sliced to the number passed for <num_regular>,
'more_galleries': A stack of galleries, excluding what's in featured,
sliced to the number passed for <num_regular>,
'additional': A mixed list of articles and galleries, excluding what's in featured,
sliced to the number passed for <num_regular>,
"""
from articles.models import Article
from photos.models import Gallery
from video.models import Video
articles = Article.published.only('title', 'summary', 'slug', 'created')
galleries = Gallery.published.only('title', 'summary', 'slug', 'created')
videos = Video.published.only('title', 'summary', 'slug', 'created')
if featured:
articles = articles.filter(featured=True)
galleries = galleries.filter(featured=True)
videos = videos.filter(featured=True)
# now slice to maximum possible for each group
# and go ahead and make them lists for chaining
max_total = top + additional
articles = list(articles[:max_total])
galleries = list(galleries[:max_total])
videos = list(videos[:max_total])
# chain the lists now
content = chain(articles, galleries, videos)
content = sorted(content, key=lambda instance: instance.created)
content.reverse()
top_content = content[:top]
additional_content = content[top:max_total]
return {
'top_content': top_content,
'additional_content': additional_content,
'MEDIA_URL': settings.MEDIA_URL,
} | def function[get_fresh_content, parameter[top, additional, featured]]:
constant[
Requires articles, photos and video packages to be installed.
Returns published *Featured* content (articles, galleries, video, etc)
and an additional batch of fresh regular (featured or not) content.
The number of objects returned is defined when the tag is called.
The top item type is defined in the sites admin for sites that
have the supersites app enabled.
If "featured" is True, will limit to only featured content.
Usage::
{% get_fresh_content 5 10 %}
Would return five top objects and 10 additional
{% get_fresh_content 4 8 featured %}
Would return four top objects and 8 additional, limited to featured content.
What you get::
'top_item': the top featured item
'top_item_type': the content type for the top item (article, gallery, video)
'featured': Additional featured items.
If you asked for 5 featureed items,
there will be four -- five minus the one that's in `top_item`.
'articles': featured articles, minus the top item
'galleries': featured galleries, minus the top item
'vids': featured video, minus the top item,
'more_articles': A stack of articles, excluding what's in featured,
sliced to the number passed for <num_regular>,
'more_galleries': A stack of galleries, excluding what's in featured,
sliced to the number passed for <num_regular>,
'additional': A mixed list of articles and galleries, excluding what's in featured,
sliced to the number passed for <num_regular>,
]
from relative_module[articles.models] import module[Article]
from relative_module[photos.models] import module[Gallery]
from relative_module[video.models] import module[Video]
variable[articles] assign[=] call[name[Article].published.only, parameter[constant[title], constant[summary], constant[slug], constant[created]]]
variable[galleries] assign[=] call[name[Gallery].published.only, parameter[constant[title], constant[summary], constant[slug], constant[created]]]
variable[videos] assign[=] call[name[Video].published.only, parameter[constant[title], constant[summary], constant[slug], constant[created]]]
if name[featured] begin[:]
variable[articles] assign[=] call[name[articles].filter, parameter[]]
variable[galleries] assign[=] call[name[galleries].filter, parameter[]]
variable[videos] assign[=] call[name[videos].filter, parameter[]]
variable[max_total] assign[=] binary_operation[name[top] + name[additional]]
variable[articles] assign[=] call[name[list], parameter[call[name[articles]][<ast.Slice object at 0x7da1b146eda0>]]]
variable[galleries] assign[=] call[name[list], parameter[call[name[galleries]][<ast.Slice object at 0x7da1b146e590>]]]
variable[videos] assign[=] call[name[list], parameter[call[name[videos]][<ast.Slice object at 0x7da1b146ec20>]]]
variable[content] assign[=] call[name[chain], parameter[name[articles], name[galleries], name[videos]]]
variable[content] assign[=] call[name[sorted], parameter[name[content]]]
call[name[content].reverse, parameter[]]
variable[top_content] assign[=] call[name[content]][<ast.Slice object at 0x7da1b1437550>]
variable[additional_content] assign[=] call[name[content]][<ast.Slice object at 0x7da1b1437dc0>]
return[dictionary[[<ast.Constant object at 0x7da1b14348e0>, <ast.Constant object at 0x7da1b14341f0>, <ast.Constant object at 0x7da1b1434f10>], [<ast.Name object at 0x7da1b1437bb0>, <ast.Name object at 0x7da1b1435720>, <ast.Attribute object at 0x7da1b1437580>]]] | keyword[def] identifier[get_fresh_content] ( identifier[top] = literal[int] , identifier[additional] = literal[int] , identifier[featured] = keyword[False] ):
literal[string]
keyword[from] identifier[articles] . identifier[models] keyword[import] identifier[Article]
keyword[from] identifier[photos] . identifier[models] keyword[import] identifier[Gallery]
keyword[from] identifier[video] . identifier[models] keyword[import] identifier[Video]
identifier[articles] = identifier[Article] . identifier[published] . identifier[only] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[galleries] = identifier[Gallery] . identifier[published] . identifier[only] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[videos] = identifier[Video] . identifier[published] . identifier[only] ( literal[string] , literal[string] , literal[string] , literal[string] )
keyword[if] identifier[featured] :
identifier[articles] = identifier[articles] . identifier[filter] ( identifier[featured] = keyword[True] )
identifier[galleries] = identifier[galleries] . identifier[filter] ( identifier[featured] = keyword[True] )
identifier[videos] = identifier[videos] . identifier[filter] ( identifier[featured] = keyword[True] )
identifier[max_total] = identifier[top] + identifier[additional]
identifier[articles] = identifier[list] ( identifier[articles] [: identifier[max_total] ])
identifier[galleries] = identifier[list] ( identifier[galleries] [: identifier[max_total] ])
identifier[videos] = identifier[list] ( identifier[videos] [: identifier[max_total] ])
identifier[content] = identifier[chain] ( identifier[articles] , identifier[galleries] , identifier[videos] )
identifier[content] = identifier[sorted] ( identifier[content] , identifier[key] = keyword[lambda] identifier[instance] : identifier[instance] . identifier[created] )
identifier[content] . identifier[reverse] ()
identifier[top_content] = identifier[content] [: identifier[top] ]
identifier[additional_content] = identifier[content] [ identifier[top] : identifier[max_total] ]
keyword[return] {
literal[string] : identifier[top_content] ,
literal[string] : identifier[additional_content] ,
literal[string] : identifier[settings] . identifier[MEDIA_URL] ,
} | def get_fresh_content(top=4, additional=10, featured=False):
"""
Requires articles, photos and video packages to be installed.
Returns published *Featured* content (articles, galleries, video, etc)
and an additional batch of fresh regular (featured or not) content.
The number of objects returned is defined when the tag is called.
The top item type is defined in the sites admin for sites that
have the supersites app enabled.
If "featured" is True, will limit to only featured content.
Usage::
{% get_fresh_content 5 10 %}
Would return five top objects and 10 additional
{% get_fresh_content 4 8 featured %}
Would return four top objects and 8 additional, limited to featured content.
What you get::
'top_item': the top featured item
'top_item_type': the content type for the top item (article, gallery, video)
'featured': Additional featured items.
If you asked for 5 featureed items,
there will be four -- five minus the one that's in `top_item`.
'articles': featured articles, minus the top item
'galleries': featured galleries, minus the top item
'vids': featured video, minus the top item,
'more_articles': A stack of articles, excluding what's in featured,
sliced to the number passed for <num_regular>,
'more_galleries': A stack of galleries, excluding what's in featured,
sliced to the number passed for <num_regular>,
'additional': A mixed list of articles and galleries, excluding what's in featured,
sliced to the number passed for <num_regular>,
"""
from articles.models import Article
from photos.models import Gallery
from video.models import Video
articles = Article.published.only('title', 'summary', 'slug', 'created')
galleries = Gallery.published.only('title', 'summary', 'slug', 'created')
videos = Video.published.only('title', 'summary', 'slug', 'created')
if featured:
articles = articles.filter(featured=True)
galleries = galleries.filter(featured=True)
videos = videos.filter(featured=True) # depends on [control=['if'], data=[]]
# now slice to maximum possible for each group
# and go ahead and make them lists for chaining
max_total = top + additional
articles = list(articles[:max_total])
galleries = list(galleries[:max_total])
videos = list(videos[:max_total])
# chain the lists now
content = chain(articles, galleries, videos)
content = sorted(content, key=lambda instance: instance.created)
content.reverse()
top_content = content[:top]
additional_content = content[top:max_total]
return {'top_content': top_content, 'additional_content': additional_content, 'MEDIA_URL': settings.MEDIA_URL} |
def interm_fluent_variables(self) -> FluentParamsList:
'''Returns the instantiated intermediate fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
'''
fluents = self.domain.intermediate_fluents
ordering = self.domain.interm_fluent_ordering
return self._fluent_params(fluents, ordering) | def function[interm_fluent_variables, parameter[self]]:
constant[Returns the instantiated intermediate fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
]
variable[fluents] assign[=] name[self].domain.intermediate_fluents
variable[ordering] assign[=] name[self].domain.interm_fluent_ordering
return[call[name[self]._fluent_params, parameter[name[fluents], name[ordering]]]] | keyword[def] identifier[interm_fluent_variables] ( identifier[self] )-> identifier[FluentParamsList] :
literal[string]
identifier[fluents] = identifier[self] . identifier[domain] . identifier[intermediate_fluents]
identifier[ordering] = identifier[self] . identifier[domain] . identifier[interm_fluent_ordering]
keyword[return] identifier[self] . identifier[_fluent_params] ( identifier[fluents] , identifier[ordering] ) | def interm_fluent_variables(self) -> FluentParamsList:
"""Returns the instantiated intermediate fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
"""
fluents = self.domain.intermediate_fluents
ordering = self.domain.interm_fluent_ordering
return self._fluent_params(fluents, ordering) |
def _get_unit_factor(cls, unit):
"""
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
"""
try:
if isinstance(unit, str):
unit = cls.UNIT_FACTOR_NAMES[unit]
return cls.UNIT_FACTORS[unit]
except KeyError:
raise UnsupportedUnitError() | def function[_get_unit_factor, parameter[cls, unit]]:
constant[
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
]
<ast.Try object at 0x7da1b2293580> | keyword[def] identifier[_get_unit_factor] ( identifier[cls] , identifier[unit] ):
literal[string]
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[unit] , identifier[str] ):
identifier[unit] = identifier[cls] . identifier[UNIT_FACTOR_NAMES] [ identifier[unit] ]
keyword[return] identifier[cls] . identifier[UNIT_FACTORS] [ identifier[unit] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[UnsupportedUnitError] () | def _get_unit_factor(cls, unit):
"""
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
"""
try:
if isinstance(unit, str):
unit = cls.UNIT_FACTOR_NAMES[unit] # depends on [control=['if'], data=[]]
return cls.UNIT_FACTORS[unit] # depends on [control=['try'], data=[]]
except KeyError:
raise UnsupportedUnitError() # depends on [control=['except'], data=[]] |
def _new_redis_client(self):
"""Create a new redis client and assign it the class _redis_client
attribute for reuse across requests.
:rtype: tornadoredis.Client()
"""
if 'tornadoredis' not in globals():
import tornadoredis
kwargs = self._redis_connection_settings()
LOGGER.info('Connecting to %(host)s:%(port)s DB %(selected_db)s',
kwargs)
return tornadoredis.Client(**kwargs) | def function[_new_redis_client, parameter[self]]:
constant[Create a new redis client and assign it the class _redis_client
attribute for reuse across requests.
:rtype: tornadoredis.Client()
]
if compare[constant[tornadoredis] <ast.NotIn object at 0x7da2590d7190> call[name[globals], parameter[]]] begin[:]
import module[tornadoredis]
variable[kwargs] assign[=] call[name[self]._redis_connection_settings, parameter[]]
call[name[LOGGER].info, parameter[constant[Connecting to %(host)s:%(port)s DB %(selected_db)s], name[kwargs]]]
return[call[name[tornadoredis].Client, parameter[]]] | keyword[def] identifier[_new_redis_client] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[globals] ():
keyword[import] identifier[tornadoredis]
identifier[kwargs] = identifier[self] . identifier[_redis_connection_settings] ()
identifier[LOGGER] . identifier[info] ( literal[string] ,
identifier[kwargs] )
keyword[return] identifier[tornadoredis] . identifier[Client] (** identifier[kwargs] ) | def _new_redis_client(self):
"""Create a new redis client and assign it the class _redis_client
attribute for reuse across requests.
:rtype: tornadoredis.Client()
"""
if 'tornadoredis' not in globals():
import tornadoredis # depends on [control=['if'], data=[]]
kwargs = self._redis_connection_settings()
LOGGER.info('Connecting to %(host)s:%(port)s DB %(selected_db)s', kwargs)
return tornadoredis.Client(**kwargs) |
def iterate_similarity_datasets(args):
"""Generator over all similarity evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset.
"""
for dataset_name in args.similarity_datasets:
parameters = nlp.data.list_datasets(dataset_name)
for key_values in itertools.product(*parameters.values()):
kwargs = dict(zip(parameters.keys(), key_values))
yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs) | def function[iterate_similarity_datasets, parameter[args]]:
constant[Generator over all similarity evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset.
]
for taget[name[dataset_name]] in starred[name[args].similarity_datasets] begin[:]
variable[parameters] assign[=] call[name[nlp].data.list_datasets, parameter[name[dataset_name]]]
for taget[name[key_values]] in starred[call[name[itertools].product, parameter[<ast.Starred object at 0x7da1b21e3f10>]]] begin[:]
variable[kwargs] assign[=] call[name[dict], parameter[call[name[zip], parameter[call[name[parameters].keys, parameter[]], name[key_values]]]]]
<ast.Yield object at 0x7da1b21e3940> | keyword[def] identifier[iterate_similarity_datasets] ( identifier[args] ):
literal[string]
keyword[for] identifier[dataset_name] keyword[in] identifier[args] . identifier[similarity_datasets] :
identifier[parameters] = identifier[nlp] . identifier[data] . identifier[list_datasets] ( identifier[dataset_name] )
keyword[for] identifier[key_values] keyword[in] identifier[itertools] . identifier[product] (* identifier[parameters] . identifier[values] ()):
identifier[kwargs] = identifier[dict] ( identifier[zip] ( identifier[parameters] . identifier[keys] (), identifier[key_values] ))
keyword[yield] identifier[dataset_name] , identifier[kwargs] , identifier[nlp] . identifier[data] . identifier[create] ( identifier[dataset_name] ,** identifier[kwargs] ) | def iterate_similarity_datasets(args):
"""Generator over all similarity evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset.
"""
for dataset_name in args.similarity_datasets:
parameters = nlp.data.list_datasets(dataset_name)
for key_values in itertools.product(*parameters.values()):
kwargs = dict(zip(parameters.keys(), key_values))
yield (dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)) # depends on [control=['for'], data=['key_values']] # depends on [control=['for'], data=['dataset_name']] |
def properties_mutator(cls, name, ioper=False):
"""Wraps a mutating container method to add HasProperties notifications
If the container is not part of a HasProperties instance, behavior
is unchanged. However, if it is part of a HasProperties instance
the new method calls set, triggering change notifications.
"""
def wrapper(self, *args, **kwargs):
"""Mutate if not part of HasProperties; copy/modify/set otherwise"""
if (
getattr(self, '_instance', None) is None or
getattr(self, '_name', '') == '' or
self is not getattr(self._instance, self._name)
):
return getattr(super(cls, self), name)(*args, **kwargs)
copy = cls(self)
val = getattr(copy, name)(*args, **kwargs)
if not ioper:
setattr(self._instance, self._name, copy)
self._instance = None
self._name = ''
return val
wrapped = getattr(cls, name)
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
return wrapper | def function[properties_mutator, parameter[cls, name, ioper]]:
constant[Wraps a mutating container method to add HasProperties notifications
If the container is not part of a HasProperties instance, behavior
is unchanged. However, if it is part of a HasProperties instance
the new method calls set, triggering change notifications.
]
def function[wrapper, parameter[self]]:
constant[Mutate if not part of HasProperties; copy/modify/set otherwise]
if <ast.BoolOp object at 0x7da1b050b2e0> begin[:]
return[call[call[name[getattr], parameter[call[name[super], parameter[name[cls], name[self]]], name[name]]], parameter[<ast.Starred object at 0x7da1b050b2b0>]]]
variable[copy] assign[=] call[name[cls], parameter[name[self]]]
variable[val] assign[=] call[call[name[getattr], parameter[name[copy], name[name]]], parameter[<ast.Starred object at 0x7da1b050b790>]]
if <ast.UnaryOp object at 0x7da1b050b070> begin[:]
call[name[setattr], parameter[name[self]._instance, name[self]._name, name[copy]]]
name[self]._instance assign[=] constant[None]
name[self]._name assign[=] constant[]
return[name[val]]
variable[wrapped] assign[=] call[name[getattr], parameter[name[cls], name[name]]]
name[wrapper].__name__ assign[=] name[wrapped].__name__
name[wrapper].__doc__ assign[=] name[wrapped].__doc__
return[name[wrapper]] | keyword[def] identifier[properties_mutator] ( identifier[cls] , identifier[name] , identifier[ioper] = keyword[False] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] (
identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[is] keyword[None] keyword[or]
identifier[getattr] ( identifier[self] , literal[string] , literal[string] )== literal[string] keyword[or]
identifier[self] keyword[is] keyword[not] identifier[getattr] ( identifier[self] . identifier[_instance] , identifier[self] . identifier[_name] )
):
keyword[return] identifier[getattr] ( identifier[super] ( identifier[cls] , identifier[self] ), identifier[name] )(* identifier[args] ,** identifier[kwargs] )
identifier[copy] = identifier[cls] ( identifier[self] )
identifier[val] = identifier[getattr] ( identifier[copy] , identifier[name] )(* identifier[args] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[ioper] :
identifier[setattr] ( identifier[self] . identifier[_instance] , identifier[self] . identifier[_name] , identifier[copy] )
identifier[self] . identifier[_instance] = keyword[None]
identifier[self] . identifier[_name] = literal[string]
keyword[return] identifier[val]
identifier[wrapped] = identifier[getattr] ( identifier[cls] , identifier[name] )
identifier[wrapper] . identifier[__name__] = identifier[wrapped] . identifier[__name__]
identifier[wrapper] . identifier[__doc__] = identifier[wrapped] . identifier[__doc__]
keyword[return] identifier[wrapper] | def properties_mutator(cls, name, ioper=False):
"""Wraps a mutating container method to add HasProperties notifications
If the container is not part of a HasProperties instance, behavior
is unchanged. However, if it is part of a HasProperties instance
the new method calls set, triggering change notifications.
"""
def wrapper(self, *args, **kwargs):
"""Mutate if not part of HasProperties; copy/modify/set otherwise"""
if getattr(self, '_instance', None) is None or getattr(self, '_name', '') == '' or self is not getattr(self._instance, self._name):
return getattr(super(cls, self), name)(*args, **kwargs) # depends on [control=['if'], data=[]]
copy = cls(self)
val = getattr(copy, name)(*args, **kwargs)
if not ioper:
setattr(self._instance, self._name, copy) # depends on [control=['if'], data=[]]
self._instance = None
self._name = ''
return val
wrapped = getattr(cls, name)
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
return wrapper |
def unique_scene_labels(scene_list):
"""Find the unique scene labels
Parameters
----------
scene_list : list, shape=(n,)
A list containing scene dicts
Returns
-------
labels: list, shape=(n,)
Unique labels in alphabetical order
"""
if isinstance(scene_list, dcase_util.containers.MetaDataContainer):
return scene_list.unique_scene_labels
else:
labels = []
for item in scene_list:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels | def function[unique_scene_labels, parameter[scene_list]]:
constant[Find the unique scene labels
Parameters
----------
scene_list : list, shape=(n,)
A list containing scene dicts
Returns
-------
labels: list, shape=(n,)
Unique labels in alphabetical order
]
if call[name[isinstance], parameter[name[scene_list], name[dcase_util].containers.MetaDataContainer]] begin[:]
return[name[scene_list].unique_scene_labels] | keyword[def] identifier[unique_scene_labels] ( identifier[scene_list] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[scene_list] , identifier[dcase_util] . identifier[containers] . identifier[MetaDataContainer] ):
keyword[return] identifier[scene_list] . identifier[unique_scene_labels]
keyword[else] :
identifier[labels] =[]
keyword[for] identifier[item] keyword[in] identifier[scene_list] :
keyword[if] literal[string] keyword[in] identifier[item] keyword[and] identifier[item] [ literal[string] ] keyword[not] keyword[in] identifier[labels] :
identifier[labels] . identifier[append] ( identifier[item] [ literal[string] ])
identifier[labels] . identifier[sort] ()
keyword[return] identifier[labels] | def unique_scene_labels(scene_list):
"""Find the unique scene labels
Parameters
----------
scene_list : list, shape=(n,)
A list containing scene dicts
Returns
-------
labels: list, shape=(n,)
Unique labels in alphabetical order
"""
if isinstance(scene_list, dcase_util.containers.MetaDataContainer):
return scene_list.unique_scene_labels # depends on [control=['if'], data=[]]
else:
labels = []
for item in scene_list:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
labels.sort()
return labels |
def playlist_songs_move(
self,
playlist_songs,
*,
after=None,
before=None,
index=None,
position=None
):
"""Move songs in a playlist.
Note:
* Provide no optional arguments to move to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to move to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
playlist_songs (list): A list of playlist song dicts.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
if not more_itertools.all_equal(
playlist_song['playlistId']
for playlist_song in playlist_songs
):
raise ValueError(
"All 'playlist_songs' must be from the same playlist."
)
playlist = self.playlist(
playlist_songs[0]['playlistId'],
include_songs=True
)
prev, next_ = get_ple_prev_next(
playlist['tracks'],
after=after,
before=before,
index=index,
position=position
)
playlist_songs_len = len(playlist_songs)
for i, playlist_song in enumerate(playlist_songs):
mutation = mc_calls.PlaylistEntriesBatch.update(
playlist_song,
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0]
# TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break
if i < playlist_songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break
return self.playlist(playlist_songs[0]['playlistId'], include_songs=True) | def function[playlist_songs_move, parameter[self, playlist_songs]]:
constant[Move songs in a playlist.
Note:
* Provide no optional arguments to move to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to move to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
playlist_songs (list): A list of playlist song dicts.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
]
if <ast.UnaryOp object at 0x7da204345bd0> begin[:]
<ast.Raise object at 0x7da2043474f0>
variable[playlist] assign[=] call[name[self].playlist, parameter[call[call[name[playlist_songs]][constant[0]]][constant[playlistId]]]]
<ast.Tuple object at 0x7da2043449d0> assign[=] call[name[get_ple_prev_next], parameter[call[name[playlist]][constant[tracks]]]]
variable[playlist_songs_len] assign[=] call[name[len], parameter[name[playlist_songs]]]
for taget[tuple[[<ast.Name object at 0x7da18ede48e0>, <ast.Name object at 0x7da18ede7cd0>]]] in starred[call[name[enumerate], parameter[name[playlist_songs]]]] begin[:]
variable[mutation] assign[=] call[name[mc_calls].PlaylistEntriesBatch.update, parameter[name[playlist_song]]]
variable[response] assign[=] call[name[self]._call, parameter[name[mc_calls].PlaylistEntriesBatch, name[mutation]]]
variable[result] assign[=] call[call[name[response].body][constant[mutate_response]]][constant[0]]
if compare[call[name[result]][constant[response_code]] not_equal[!=] constant[OK]] begin[:]
break
if compare[name[i] less[<] binary_operation[name[playlist_songs_len] - constant[1]]] begin[:]
while constant[True] begin[:]
variable[prev] assign[=] call[name[self].playlist_song, parameter[call[name[result]][constant[id]]]]
if name[prev] begin[:]
break
return[call[name[self].playlist, parameter[call[call[name[playlist_songs]][constant[0]]][constant[playlistId]]]]] | keyword[def] identifier[playlist_songs_move] (
identifier[self] ,
identifier[playlist_songs] ,
*,
identifier[after] = keyword[None] ,
identifier[before] = keyword[None] ,
identifier[index] = keyword[None] ,
identifier[position] = keyword[None]
):
literal[string]
keyword[if] keyword[not] identifier[more_itertools] . identifier[all_equal] (
identifier[playlist_song] [ literal[string] ]
keyword[for] identifier[playlist_song] keyword[in] identifier[playlist_songs]
):
keyword[raise] identifier[ValueError] (
literal[string]
)
identifier[playlist] = identifier[self] . identifier[playlist] (
identifier[playlist_songs] [ literal[int] ][ literal[string] ],
identifier[include_songs] = keyword[True]
)
identifier[prev] , identifier[next_] = identifier[get_ple_prev_next] (
identifier[playlist] [ literal[string] ],
identifier[after] = identifier[after] ,
identifier[before] = identifier[before] ,
identifier[index] = identifier[index] ,
identifier[position] = identifier[position]
)
identifier[playlist_songs_len] = identifier[len] ( identifier[playlist_songs] )
keyword[for] identifier[i] , identifier[playlist_song] keyword[in] identifier[enumerate] ( identifier[playlist_songs] ):
identifier[mutation] = identifier[mc_calls] . identifier[PlaylistEntriesBatch] . identifier[update] (
identifier[playlist_song] ,
identifier[preceding_entry_id] = identifier[prev] . identifier[get] ( literal[string] ),
identifier[following_entry_id] = identifier[next_] . identifier[get] ( literal[string] )
)
identifier[response] = identifier[self] . identifier[_call] ( identifier[mc_calls] . identifier[PlaylistEntriesBatch] , identifier[mutation] )
identifier[result] = identifier[response] . identifier[body] [ literal[string] ][ literal[int] ]
keyword[if] identifier[result] [ literal[string] ]!= literal[string] :
keyword[break]
keyword[if] identifier[i] < identifier[playlist_songs_len] - literal[int] :
keyword[while] keyword[True] :
identifier[prev] = identifier[self] . identifier[playlist_song] ( identifier[result] [ literal[string] ])
keyword[if] identifier[prev] :
keyword[break]
keyword[return] identifier[self] . identifier[playlist] ( identifier[playlist_songs] [ literal[int] ][ literal[string] ], identifier[include_songs] = keyword[True] ) | def playlist_songs_move(self, playlist_songs, *, after=None, before=None, index=None, position=None):
"""Move songs in a playlist.
Note:
* Provide no optional arguments to move to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to move to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
playlist_songs (list): A list of playlist song dicts.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
if not more_itertools.all_equal((playlist_song['playlistId'] for playlist_song in playlist_songs)):
raise ValueError("All 'playlist_songs' must be from the same playlist.") # depends on [control=['if'], data=[]]
playlist = self.playlist(playlist_songs[0]['playlistId'], include_songs=True)
(prev, next_) = get_ple_prev_next(playlist['tracks'], after=after, before=before, index=index, position=position)
playlist_songs_len = len(playlist_songs)
for (i, playlist_song) in enumerate(playlist_songs):
mutation = mc_calls.PlaylistEntriesBatch.update(playlist_song, preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id'))
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0] # TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break # depends on [control=['if'], data=[]]
if i < playlist_songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return self.playlist(playlist_songs[0]['playlistId'], include_songs=True) |
def list():
"""List running TensorBoard instances."""
running_list = []
parser = argparse.ArgumentParser()
parser.add_argument('--logdir')
parser.add_argument('--port')
for p in psutil.process_iter():
if p.name() != 'tensorboard' or p.status() == psutil.STATUS_ZOMBIE:
continue
cmd_args = p.cmdline()
del cmd_args[0:2] # remove 'python' and 'tensorboard'
args = parser.parse_args(cmd_args)
running_list.append({'pid': p.pid, 'logdir': args.logdir, 'port': args.port})
return pd.DataFrame(running_list) | def function[list, parameter[]]:
constant[List running TensorBoard instances.]
variable[running_list] assign[=] list[[]]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[--logdir]]]
call[name[parser].add_argument, parameter[constant[--port]]]
for taget[name[p]] in starred[call[name[psutil].process_iter, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b1123490> begin[:]
continue
variable[cmd_args] assign[=] call[name[p].cmdline, parameter[]]
<ast.Delete object at 0x7da1b1122e30>
variable[args] assign[=] call[name[parser].parse_args, parameter[name[cmd_args]]]
call[name[running_list].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1123550>, <ast.Constant object at 0x7da1b1122140>, <ast.Constant object at 0x7da1b1121870>], [<ast.Attribute object at 0x7da1b11229e0>, <ast.Attribute object at 0x7da1b1121ff0>, <ast.Attribute object at 0x7da1b1120c10>]]]]
return[call[name[pd].DataFrame, parameter[name[running_list]]]] | keyword[def] identifier[list] ():
literal[string]
identifier[running_list] =[]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] )
keyword[for] identifier[p] keyword[in] identifier[psutil] . identifier[process_iter] ():
keyword[if] identifier[p] . identifier[name] ()!= literal[string] keyword[or] identifier[p] . identifier[status] ()== identifier[psutil] . identifier[STATUS_ZOMBIE] :
keyword[continue]
identifier[cmd_args] = identifier[p] . identifier[cmdline] ()
keyword[del] identifier[cmd_args] [ literal[int] : literal[int] ]
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[cmd_args] )
identifier[running_list] . identifier[append] ({ literal[string] : identifier[p] . identifier[pid] , literal[string] : identifier[args] . identifier[logdir] , literal[string] : identifier[args] . identifier[port] })
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[running_list] ) | def list():
"""List running TensorBoard instances."""
running_list = []
parser = argparse.ArgumentParser()
parser.add_argument('--logdir')
parser.add_argument('--port')
for p in psutil.process_iter():
if p.name() != 'tensorboard' or p.status() == psutil.STATUS_ZOMBIE:
continue # depends on [control=['if'], data=[]]
cmd_args = p.cmdline()
del cmd_args[0:2] # remove 'python' and 'tensorboard'
args = parser.parse_args(cmd_args)
running_list.append({'pid': p.pid, 'logdir': args.logdir, 'port': args.port}) # depends on [control=['for'], data=['p']]
return pd.DataFrame(running_list) |
def get_changed_vars(section: SoS_Step):
'''changed vars are variables that are "shared" and therefore "provides"
to others '''
if 'shared' not in section.options:
return set()
changed_vars = set()
svars = section.options['shared']
if isinstance(svars, str):
changed_vars.add(svars)
svars = {svars: svars}
elif isinstance(svars, Sequence):
for item in svars:
if isinstance(item, str):
changed_vars.add(item)
elif isinstance(item, Mapping):
changed_vars |= set(item.keys())
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'
)
elif isinstance(svars, Mapping):
changed_vars |= set(svars.keys())
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'
)
return changed_vars | def function[get_changed_vars, parameter[section]]:
constant[changed vars are variables that are "shared" and therefore "provides"
to others ]
if compare[constant[shared] <ast.NotIn object at 0x7da2590d7190> name[section].options] begin[:]
return[call[name[set], parameter[]]]
variable[changed_vars] assign[=] call[name[set], parameter[]]
variable[svars] assign[=] call[name[section].options][constant[shared]]
if call[name[isinstance], parameter[name[svars], name[str]]] begin[:]
call[name[changed_vars].add, parameter[name[svars]]]
variable[svars] assign[=] dictionary[[<ast.Name object at 0x7da1b12b58d0>], [<ast.Name object at 0x7da1b12b5900>]]
return[name[changed_vars]] | keyword[def] identifier[get_changed_vars] ( identifier[section] : identifier[SoS_Step] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[section] . identifier[options] :
keyword[return] identifier[set] ()
identifier[changed_vars] = identifier[set] ()
identifier[svars] = identifier[section] . identifier[options] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[svars] , identifier[str] ):
identifier[changed_vars] . identifier[add] ( identifier[svars] )
identifier[svars] ={ identifier[svars] : identifier[svars] }
keyword[elif] identifier[isinstance] ( identifier[svars] , identifier[Sequence] ):
keyword[for] identifier[item] keyword[in] identifier[svars] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[str] ):
identifier[changed_vars] . identifier[add] ( identifier[item] )
keyword[elif] identifier[isinstance] ( identifier[item] , identifier[Mapping] ):
identifier[changed_vars] |= identifier[set] ( identifier[item] . identifier[keys] ())
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[elif] identifier[isinstance] ( identifier[svars] , identifier[Mapping] ):
identifier[changed_vars] |= identifier[set] ( identifier[svars] . identifier[keys] ())
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[return] identifier[changed_vars] | def get_changed_vars(section: SoS_Step):
"""changed vars are variables that are "shared" and therefore "provides"
to others """
if 'shared' not in section.options:
return set() # depends on [control=['if'], data=[]]
changed_vars = set()
svars = section.options['shared']
if isinstance(svars, str):
changed_vars.add(svars)
svars = {svars: svars} # depends on [control=['if'], data=[]]
elif isinstance(svars, Sequence):
for item in svars:
if isinstance(item, str):
changed_vars.add(item) # depends on [control=['if'], data=[]]
elif isinstance(item, Mapping):
changed_vars |= set(item.keys()) # depends on [control=['if'], data=[]]
else:
raise ValueError(f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided') # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
elif isinstance(svars, Mapping):
changed_vars |= set(svars.keys()) # depends on [control=['if'], data=[]]
else:
raise ValueError(f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided')
return changed_vars |
def get_gcps(self):
"""Read GCP from the GDAL band.
Args:
band (gdal band): Measurement band which comes with GCP's
coordinates (tuple): A tuple with longitude and latitude arrays
Returns:
points (tuple): Pixel and Line indices 1d arrays
gcp_coords (tuple): longitude and latitude 1d arrays
"""
gcps = self.filehandle.gcps
gcp_array = np.array([(p.row, p.col, p.x, p.y, p.z) for p in gcps[0]])
ypoints = np.unique(gcp_array[:, 0])
xpoints = np.unique(gcp_array[:, 1])
gcp_lons = gcp_array[:, 2].reshape(ypoints.shape[0], xpoints.shape[0])
gcp_lats = gcp_array[:, 3].reshape(ypoints.shape[0], xpoints.shape[0])
gcp_alts = gcp_array[:, 4].reshape(ypoints.shape[0], xpoints.shape[0])
return (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), gcps | def function[get_gcps, parameter[self]]:
constant[Read GCP from the GDAL band.
Args:
band (gdal band): Measurement band which comes with GCP's
coordinates (tuple): A tuple with longitude and latitude arrays
Returns:
points (tuple): Pixel and Line indices 1d arrays
gcp_coords (tuple): longitude and latitude 1d arrays
]
variable[gcps] assign[=] name[self].filehandle.gcps
variable[gcp_array] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b2254c10>]]
variable[ypoints] assign[=] call[name[np].unique, parameter[call[name[gcp_array]][tuple[[<ast.Slice object at 0x7da1b2256110>, <ast.Constant object at 0x7da1b22549d0>]]]]]
variable[xpoints] assign[=] call[name[np].unique, parameter[call[name[gcp_array]][tuple[[<ast.Slice object at 0x7da1b2257b20>, <ast.Constant object at 0x7da1b2257490>]]]]]
variable[gcp_lons] assign[=] call[call[name[gcp_array]][tuple[[<ast.Slice object at 0x7da1b2255db0>, <ast.Constant object at 0x7da1b2256ef0>]]].reshape, parameter[call[name[ypoints].shape][constant[0]], call[name[xpoints].shape][constant[0]]]]
variable[gcp_lats] assign[=] call[call[name[gcp_array]][tuple[[<ast.Slice object at 0x7da1b2254520>, <ast.Constant object at 0x7da1b2256350>]]].reshape, parameter[call[name[ypoints].shape][constant[0]], call[name[xpoints].shape][constant[0]]]]
variable[gcp_alts] assign[=] call[call[name[gcp_array]][tuple[[<ast.Slice object at 0x7da1b2254be0>, <ast.Constant object at 0x7da1b2257370>]]].reshape, parameter[call[name[ypoints].shape][constant[0]], call[name[xpoints].shape][constant[0]]]]
return[tuple[[<ast.Tuple object at 0x7da1b22a4940>, <ast.Tuple object at 0x7da1b22a48b0>, <ast.Name object at 0x7da1b22a4100>]]] | keyword[def] identifier[get_gcps] ( identifier[self] ):
literal[string]
identifier[gcps] = identifier[self] . identifier[filehandle] . identifier[gcps]
identifier[gcp_array] = identifier[np] . identifier[array] ([( identifier[p] . identifier[row] , identifier[p] . identifier[col] , identifier[p] . identifier[x] , identifier[p] . identifier[y] , identifier[p] . identifier[z] ) keyword[for] identifier[p] keyword[in] identifier[gcps] [ literal[int] ]])
identifier[ypoints] = identifier[np] . identifier[unique] ( identifier[gcp_array] [:, literal[int] ])
identifier[xpoints] = identifier[np] . identifier[unique] ( identifier[gcp_array] [:, literal[int] ])
identifier[gcp_lons] = identifier[gcp_array] [:, literal[int] ]. identifier[reshape] ( identifier[ypoints] . identifier[shape] [ literal[int] ], identifier[xpoints] . identifier[shape] [ literal[int] ])
identifier[gcp_lats] = identifier[gcp_array] [:, literal[int] ]. identifier[reshape] ( identifier[ypoints] . identifier[shape] [ literal[int] ], identifier[xpoints] . identifier[shape] [ literal[int] ])
identifier[gcp_alts] = identifier[gcp_array] [:, literal[int] ]. identifier[reshape] ( identifier[ypoints] . identifier[shape] [ literal[int] ], identifier[xpoints] . identifier[shape] [ literal[int] ])
keyword[return] ( identifier[xpoints] , identifier[ypoints] ),( identifier[gcp_lons] , identifier[gcp_lats] , identifier[gcp_alts] ), identifier[gcps] | def get_gcps(self):
"""Read GCP from the GDAL band.
Args:
band (gdal band): Measurement band which comes with GCP's
coordinates (tuple): A tuple with longitude and latitude arrays
Returns:
points (tuple): Pixel and Line indices 1d arrays
gcp_coords (tuple): longitude and latitude 1d arrays
"""
gcps = self.filehandle.gcps
gcp_array = np.array([(p.row, p.col, p.x, p.y, p.z) for p in gcps[0]])
ypoints = np.unique(gcp_array[:, 0])
xpoints = np.unique(gcp_array[:, 1])
gcp_lons = gcp_array[:, 2].reshape(ypoints.shape[0], xpoints.shape[0])
gcp_lats = gcp_array[:, 3].reshape(ypoints.shape[0], xpoints.shape[0])
gcp_alts = gcp_array[:, 4].reshape(ypoints.shape[0], xpoints.shape[0])
return ((xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), gcps) |
def on_evaluate_request(self, py_db, request):
'''
:param EvaluateRequest request:
'''
# : :type arguments: EvaluateArguments
arguments = request.arguments
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
arguments.frameId)
self.api.request_exec_or_evaluate_json(
py_db, request, thread_id) | def function[on_evaluate_request, parameter[self, py_db, request]]:
constant[
:param EvaluateRequest request:
]
variable[arguments] assign[=] name[request].arguments
variable[thread_id] assign[=] call[name[py_db].suspended_frames_manager.get_thread_id_for_variable_reference, parameter[name[arguments].frameId]]
call[name[self].api.request_exec_or_evaluate_json, parameter[name[py_db], name[request], name[thread_id]]] | keyword[def] identifier[on_evaluate_request] ( identifier[self] , identifier[py_db] , identifier[request] ):
literal[string]
identifier[arguments] = identifier[request] . identifier[arguments]
identifier[thread_id] = identifier[py_db] . identifier[suspended_frames_manager] . identifier[get_thread_id_for_variable_reference] (
identifier[arguments] . identifier[frameId] )
identifier[self] . identifier[api] . identifier[request_exec_or_evaluate_json] (
identifier[py_db] , identifier[request] , identifier[thread_id] ) | def on_evaluate_request(self, py_db, request):
"""
:param EvaluateRequest request:
"""
# : :type arguments: EvaluateArguments
arguments = request.arguments
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(arguments.frameId)
self.api.request_exec_or_evaluate_json(py_db, request, thread_id) |
def send_terrain_data(self):
'''send some terrain data'''
for bit in range(56):
if self.current_request.mask & (1<<bit) and self.sent_mask & (1<<bit) == 0:
self.send_terrain_data_bit(bit)
return
# no bits to send
self.current_request = None
self.sent_mask = 0 | def function[send_terrain_data, parameter[self]]:
constant[send some terrain data]
for taget[name[bit]] in starred[call[name[range], parameter[constant[56]]]] begin[:]
if <ast.BoolOp object at 0x7da1b16bc610> begin[:]
call[name[self].send_terrain_data_bit, parameter[name[bit]]]
return[None]
name[self].current_request assign[=] constant[None]
name[self].sent_mask assign[=] constant[0] | keyword[def] identifier[send_terrain_data] ( identifier[self] ):
literal[string]
keyword[for] identifier[bit] keyword[in] identifier[range] ( literal[int] ):
keyword[if] identifier[self] . identifier[current_request] . identifier[mask] &( literal[int] << identifier[bit] ) keyword[and] identifier[self] . identifier[sent_mask] &( literal[int] << identifier[bit] )== literal[int] :
identifier[self] . identifier[send_terrain_data_bit] ( identifier[bit] )
keyword[return]
identifier[self] . identifier[current_request] = keyword[None]
identifier[self] . identifier[sent_mask] = literal[int] | def send_terrain_data(self):
"""send some terrain data"""
for bit in range(56):
if self.current_request.mask & 1 << bit and self.sent_mask & 1 << bit == 0:
self.send_terrain_data_bit(bit)
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['bit']]
# no bits to send
self.current_request = None
self.sent_mask = 0 |
def t_bin_NUMBER(t):
r'[01]+' # A binary integer
t.value = int(t.value, 2)
t.lexer.begin('INITIAL')
return t | def function[t_bin_NUMBER, parameter[t]]:
constant[[01]+]
name[t].value assign[=] call[name[int], parameter[name[t].value, constant[2]]]
call[name[t].lexer.begin, parameter[constant[INITIAL]]]
return[name[t]] | keyword[def] identifier[t_bin_NUMBER] ( identifier[t] ):
literal[string]
identifier[t] . identifier[value] = identifier[int] ( identifier[t] . identifier[value] , literal[int] )
identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] )
keyword[return] identifier[t] | def t_bin_NUMBER(t):
"""[01]+""" # A binary integer
t.value = int(t.value, 2)
t.lexer.begin('INITIAL')
return t |
def genCompleteTypes( compoundSig ):
"""
Generator function used to iterate over each complete,
top-level type contained in in a signature. Ex::
"iii" => [ 'i', 'i', 'i' ]
"i(ii)i" => [ 'i', '(ii)', 'i' ]
"i(i(ii))i" => [ 'i', '(i(ii))', 'i' ]
"""
i = 0
start = 0
end = len(compoundSig)
def find_end( idx, b, e ):
depth = 1
while idx < end:
subc = compoundSig[idx]
if subc == b:
depth += 1
elif subc == e:
depth -= 1
if depth == 0:
return idx
idx += 1
while i < end:
c = compoundSig[i]
if c == '(':
x = find_end(i+1, '(', ')')
yield compoundSig[i:x+1]
i = x
elif c == '{':
x = find_end(i+1, '{', '}')
yield compoundSig[i:x+1]
i = x
elif c == 'a':
start = i
g = genCompleteTypes( compoundSig[i+1:] )
ct = six.next(g)
i += len(ct)
yield 'a' + ct
else:
yield c
i += 1 | def function[genCompleteTypes, parameter[compoundSig]]:
constant[
Generator function used to iterate over each complete,
top-level type contained in in a signature. Ex::
"iii" => [ 'i', 'i', 'i' ]
"i(ii)i" => [ 'i', '(ii)', 'i' ]
"i(i(ii))i" => [ 'i', '(i(ii))', 'i' ]
]
variable[i] assign[=] constant[0]
variable[start] assign[=] constant[0]
variable[end] assign[=] call[name[len], parameter[name[compoundSig]]]
def function[find_end, parameter[idx, b, e]]:
variable[depth] assign[=] constant[1]
while compare[name[idx] less[<] name[end]] begin[:]
variable[subc] assign[=] call[name[compoundSig]][name[idx]]
if compare[name[subc] equal[==] name[b]] begin[:]
<ast.AugAssign object at 0x7da1b033a740>
<ast.AugAssign object at 0x7da1b03395a0>
while compare[name[i] less[<] name[end]] begin[:]
variable[c] assign[=] call[name[compoundSig]][name[i]]
if compare[name[c] equal[==] constant[(]] begin[:]
variable[x] assign[=] call[name[find_end], parameter[binary_operation[name[i] + constant[1]], constant[(], constant[)]]]
<ast.Yield object at 0x7da1b033bf70>
variable[i] assign[=] name[x]
<ast.AugAssign object at 0x7da1b02e4e50> | keyword[def] identifier[genCompleteTypes] ( identifier[compoundSig] ):
literal[string]
identifier[i] = literal[int]
identifier[start] = literal[int]
identifier[end] = identifier[len] ( identifier[compoundSig] )
keyword[def] identifier[find_end] ( identifier[idx] , identifier[b] , identifier[e] ):
identifier[depth] = literal[int]
keyword[while] identifier[idx] < identifier[end] :
identifier[subc] = identifier[compoundSig] [ identifier[idx] ]
keyword[if] identifier[subc] == identifier[b] :
identifier[depth] += literal[int]
keyword[elif] identifier[subc] == identifier[e] :
identifier[depth] -= literal[int]
keyword[if] identifier[depth] == literal[int] :
keyword[return] identifier[idx]
identifier[idx] += literal[int]
keyword[while] identifier[i] < identifier[end] :
identifier[c] = identifier[compoundSig] [ identifier[i] ]
keyword[if] identifier[c] == literal[string] :
identifier[x] = identifier[find_end] ( identifier[i] + literal[int] , literal[string] , literal[string] )
keyword[yield] identifier[compoundSig] [ identifier[i] : identifier[x] + literal[int] ]
identifier[i] = identifier[x]
keyword[elif] identifier[c] == literal[string] :
identifier[x] = identifier[find_end] ( identifier[i] + literal[int] , literal[string] , literal[string] )
keyword[yield] identifier[compoundSig] [ identifier[i] : identifier[x] + literal[int] ]
identifier[i] = identifier[x]
keyword[elif] identifier[c] == literal[string] :
identifier[start] = identifier[i]
identifier[g] = identifier[genCompleteTypes] ( identifier[compoundSig] [ identifier[i] + literal[int] :])
identifier[ct] = identifier[six] . identifier[next] ( identifier[g] )
identifier[i] += identifier[len] ( identifier[ct] )
keyword[yield] literal[string] + identifier[ct]
keyword[else] :
keyword[yield] identifier[c]
identifier[i] += literal[int] | def genCompleteTypes(compoundSig):
"""
Generator function used to iterate over each complete,
top-level type contained in in a signature. Ex::
"iii" => [ 'i', 'i', 'i' ]
"i(ii)i" => [ 'i', '(ii)', 'i' ]
"i(i(ii))i" => [ 'i', '(i(ii))', 'i' ]
"""
i = 0
start = 0
end = len(compoundSig)
def find_end(idx, b, e):
depth = 1
while idx < end:
subc = compoundSig[idx]
if subc == b:
depth += 1 # depends on [control=['if'], data=[]]
elif subc == e:
depth -= 1
if depth == 0:
return idx # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
idx += 1 # depends on [control=['while'], data=['idx']]
while i < end:
c = compoundSig[i]
if c == '(':
x = find_end(i + 1, '(', ')')
yield compoundSig[i:x + 1]
i = x # depends on [control=['if'], data=[]]
elif c == '{':
x = find_end(i + 1, '{', '}')
yield compoundSig[i:x + 1]
i = x # depends on [control=['if'], data=[]]
elif c == 'a':
start = i
g = genCompleteTypes(compoundSig[i + 1:])
ct = six.next(g)
i += len(ct)
yield ('a' + ct) # depends on [control=['if'], data=[]]
else:
yield c
i += 1 # depends on [control=['while'], data=['i']] |
def get_links(html, outformat):
"""Return a list of reference links from the html.
Parameters
----------
html : str
outformat : int
the output format of the citations
Returns
-------
List[str]
the links to the references
"""
if outformat == FORMAT_BIBTEX:
refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)')
elif outformat == FORMAT_ENDNOTE:
refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.enw\?[^"]*)"')
elif outformat == FORMAT_REFMAN:
refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ris\?[^"]*)"')
elif outformat == FORMAT_WENXIANWANG:
refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ral\?[^"]*)"')
reflist = refre.findall(html)
# escape html entities
reflist = [re.sub('&(%s);' % '|'.join(name2codepoint), lambda m:
chr(name2codepoint[m.group(1)]), s) for s in reflist]
return reflist | def function[get_links, parameter[html, outformat]]:
constant[Return a list of reference links from the html.
Parameters
----------
html : str
outformat : int
the output format of the citations
Returns
-------
List[str]
the links to the references
]
if compare[name[outformat] equal[==] name[FORMAT_BIBTEX]] begin[:]
variable[refre] assign[=] call[name[re].compile, parameter[constant[<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)]]]
variable[reflist] assign[=] call[name[refre].findall, parameter[name[html]]]
variable[reflist] assign[=] <ast.ListComp object at 0x7da1b113a0e0>
return[name[reflist]] | keyword[def] identifier[get_links] ( identifier[html] , identifier[outformat] ):
literal[string]
keyword[if] identifier[outformat] == identifier[FORMAT_BIBTEX] :
identifier[refre] = identifier[re] . identifier[compile] ( literal[string] )
keyword[elif] identifier[outformat] == identifier[FORMAT_ENDNOTE] :
identifier[refre] = identifier[re] . identifier[compile] ( literal[string] )
keyword[elif] identifier[outformat] == identifier[FORMAT_REFMAN] :
identifier[refre] = identifier[re] . identifier[compile] ( literal[string] )
keyword[elif] identifier[outformat] == identifier[FORMAT_WENXIANWANG] :
identifier[refre] = identifier[re] . identifier[compile] ( literal[string] )
identifier[reflist] = identifier[refre] . identifier[findall] ( identifier[html] )
identifier[reflist] =[ identifier[re] . identifier[sub] ( literal[string] % literal[string] . identifier[join] ( identifier[name2codepoint] ), keyword[lambda] identifier[m] :
identifier[chr] ( identifier[name2codepoint] [ identifier[m] . identifier[group] ( literal[int] )]), identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[reflist] ]
keyword[return] identifier[reflist] | def get_links(html, outformat):
"""Return a list of reference links from the html.
Parameters
----------
html : str
outformat : int
the output format of the citations
Returns
-------
List[str]
the links to the references
"""
if outformat == FORMAT_BIBTEX:
refre = re.compile('<a href="https://scholar.googleusercontent.com(/scholar\\.bib\\?[^"]*)') # depends on [control=['if'], data=[]]
elif outformat == FORMAT_ENDNOTE:
refre = re.compile('<a href="https://scholar.googleusercontent.com(/scholar\\.enw\\?[^"]*)"') # depends on [control=['if'], data=[]]
elif outformat == FORMAT_REFMAN:
refre = re.compile('<a href="https://scholar.googleusercontent.com(/scholar\\.ris\\?[^"]*)"') # depends on [control=['if'], data=[]]
elif outformat == FORMAT_WENXIANWANG:
refre = re.compile('<a href="https://scholar.googleusercontent.com(/scholar\\.ral\\?[^"]*)"') # depends on [control=['if'], data=[]]
reflist = refre.findall(html)
# escape html entities
reflist = [re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: chr(name2codepoint[m.group(1)]), s) for s in reflist]
return reflist |
def list_connection_channels(self, name):
"""
List of all channels for a given connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}/channels'.format(
urllib.parse.quote_plus(name)
)) | def function[list_connection_channels, parameter[self, name]]:
constant[
List of all channels for a given connection.
:param name: The connection name
:type name: str
]
return[call[name[self]._api_get, parameter[call[constant[/api/connections/{0}/channels].format, parameter[call[name[urllib].parse.quote_plus, parameter[name[name]]]]]]]] | keyword[def] identifier[list_connection_channels] ( identifier[self] , identifier[name] ):
literal[string]
keyword[return] identifier[self] . identifier[_api_get] ( literal[string] . identifier[format] (
identifier[urllib] . identifier[parse] . identifier[quote_plus] ( identifier[name] )
)) | def list_connection_channels(self, name):
"""
List of all channels for a given connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}/channels'.format(urllib.parse.quote_plus(name))) |
def get_version(self):
"""Get the DCNM version."""
url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)
payload = {}
try:
res = self._send_request('GET', url, payload, 'dcnm-version')
if res and res.status_code in self._resp_ok:
return res.json().get('Dcnm-Version')
except dexc.DfaClientRequestFailed as exc:
LOG.error("Failed to get DCNM version.")
sys.exit("ERROR: Failed to connect to DCNM: %s", exc) | def function[get_version, parameter[self]]:
constant[Get the DCNM version.]
variable[url] assign[=] binary_operation[constant[%s://%s/rest/dcnm-version] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1a5c460>, <ast.Attribute object at 0x7da1b1a5e050>]]]
variable[payload] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b1a5fdf0> | keyword[def] identifier[get_version] ( identifier[self] ):
literal[string]
identifier[url] = literal[string] %( identifier[self] . identifier[dcnm_protocol] , identifier[self] . identifier[_ip] )
identifier[payload] ={}
keyword[try] :
identifier[res] = identifier[self] . identifier[_send_request] ( literal[string] , identifier[url] , identifier[payload] , literal[string] )
keyword[if] identifier[res] keyword[and] identifier[res] . identifier[status_code] keyword[in] identifier[self] . identifier[_resp_ok] :
keyword[return] identifier[res] . identifier[json] (). identifier[get] ( literal[string] )
keyword[except] identifier[dexc] . identifier[DfaClientRequestFailed] keyword[as] identifier[exc] :
identifier[LOG] . identifier[error] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[string] , identifier[exc] ) | def get_version(self):
"""Get the DCNM version."""
url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)
payload = {}
try:
res = self._send_request('GET', url, payload, 'dcnm-version')
if res and res.status_code in self._resp_ok:
return res.json().get('Dcnm-Version') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except dexc.DfaClientRequestFailed as exc:
LOG.error('Failed to get DCNM version.')
sys.exit('ERROR: Failed to connect to DCNM: %s', exc) # depends on [control=['except'], data=['exc']] |
def touch(filepath, times=None, mkdir=False):
""" Update the modify (modify) and change (ctime) timestamps of a file, create if necessary
>>> from pugnlp.constants import DATA_PATH
>>> filepath = os.path.join(DATA_PATH, 'tmpfilefortouch.txt')
>>> touch(filepath).endswith('tmpfilefortouch.txt')
True
>>> os.path.isfile(filepath)
True
>>> os.remove(filepath)
"""
filepath = expand_path(filepath)
if mkdir:
mkdir_p(os.path.dirname(filepath))
with open(filepath, 'a'):
if times or times is None:
os.utime(filepath, times)
return filepath | def function[touch, parameter[filepath, times, mkdir]]:
constant[ Update the modify (modify) and change (ctime) timestamps of a file, create if necessary
>>> from pugnlp.constants import DATA_PATH
>>> filepath = os.path.join(DATA_PATH, 'tmpfilefortouch.txt')
>>> touch(filepath).endswith('tmpfilefortouch.txt')
True
>>> os.path.isfile(filepath)
True
>>> os.remove(filepath)
]
variable[filepath] assign[=] call[name[expand_path], parameter[name[filepath]]]
if name[mkdir] begin[:]
call[name[mkdir_p], parameter[call[name[os].path.dirname, parameter[name[filepath]]]]]
with call[name[open], parameter[name[filepath], constant[a]]] begin[:]
if <ast.BoolOp object at 0x7da18fe90790> begin[:]
call[name[os].utime, parameter[name[filepath], name[times]]]
return[name[filepath]] | keyword[def] identifier[touch] ( identifier[filepath] , identifier[times] = keyword[None] , identifier[mkdir] = keyword[False] ):
literal[string]
identifier[filepath] = identifier[expand_path] ( identifier[filepath] )
keyword[if] identifier[mkdir] :
identifier[mkdir_p] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[filepath] ))
keyword[with] identifier[open] ( identifier[filepath] , literal[string] ):
keyword[if] identifier[times] keyword[or] identifier[times] keyword[is] keyword[None] :
identifier[os] . identifier[utime] ( identifier[filepath] , identifier[times] )
keyword[return] identifier[filepath] | def touch(filepath, times=None, mkdir=False):
""" Update the modify (modify) and change (ctime) timestamps of a file, create if necessary
>>> from pugnlp.constants import DATA_PATH
>>> filepath = os.path.join(DATA_PATH, 'tmpfilefortouch.txt')
>>> touch(filepath).endswith('tmpfilefortouch.txt')
True
>>> os.path.isfile(filepath)
True
>>> os.remove(filepath)
"""
filepath = expand_path(filepath)
if mkdir:
mkdir_p(os.path.dirname(filepath)) # depends on [control=['if'], data=[]]
with open(filepath, 'a'):
if times or times is None:
os.utime(filepath, times) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
return filepath |
def clean(self, exclude, max_count=None):
"""
Delete all objects except for those in the exclude
:param exclude: a list of blob_hashes to skip.
:param max_count: maximum number of object to delete
:return: generator of objects that failed to delete
"""
return self.client.remove_objects(self.bucket, itertools.islice(
(x.object_name for x in self.client.list_objects(self.bucket, self.remote_path + '/')
if x not in exclude), max_count)) | def function[clean, parameter[self, exclude, max_count]]:
constant[
Delete all objects except for those in the exclude
:param exclude: a list of blob_hashes to skip.
:param max_count: maximum number of object to delete
:return: generator of objects that failed to delete
]
return[call[name[self].client.remove_objects, parameter[name[self].bucket, call[name[itertools].islice, parameter[<ast.GeneratorExp object at 0x7da1b12dae60>, name[max_count]]]]]] | keyword[def] identifier[clean] ( identifier[self] , identifier[exclude] , identifier[max_count] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[client] . identifier[remove_objects] ( identifier[self] . identifier[bucket] , identifier[itertools] . identifier[islice] (
( identifier[x] . identifier[object_name] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[client] . identifier[list_objects] ( identifier[self] . identifier[bucket] , identifier[self] . identifier[remote_path] + literal[string] )
keyword[if] identifier[x] keyword[not] keyword[in] identifier[exclude] ), identifier[max_count] )) | def clean(self, exclude, max_count=None):
"""
Delete all objects except for those in the exclude
:param exclude: a list of blob_hashes to skip.
:param max_count: maximum number of object to delete
:return: generator of objects that failed to delete
"""
return self.client.remove_objects(self.bucket, itertools.islice((x.object_name for x in self.client.list_objects(self.bucket, self.remote_path + '/') if x not in exclude), max_count)) |
def read(self, file, nbytes):
"""Read nbytes characters from file while running Tk mainloop"""
if not capable.OF_GRAPHICS:
raise RuntimeError("Cannot run this command without graphics")
if isinstance(file, int):
fd = file
else:
# Otherwise, assume we have Python file object
try:
fd = file.fileno()
except:
raise TypeError("file must be an integer or a filehandle/socket")
init_tk_default_root() # harmless if already done
self.widget = TKNTR._default_root
if not self.widget:
# no Tk widgets yet, so no need for mainloop
# (shouldnt happen now with init_tk_default_root)
s = []
while nbytes>0:
snew = os.read(fd, nbytes) # returns bytes in PY3K
if snew:
if PY3K: snew = snew.decode('ascii','replace')
s.append(snew)
nbytes -= len(snew)
else:
# EOF -- just return what we have so far
break
return "".join(s)
else:
self.nbytes = nbytes
self.value = []
self.widget.tk.createfilehandler(fd,
TKNTR.READABLE | TKNTR.EXCEPTION,
self._read)
try:
self.widget.mainloop()
finally:
self.widget.tk.deletefilehandler(fd)
return "".join(self.value) | def function[read, parameter[self, file, nbytes]]:
constant[Read nbytes characters from file while running Tk mainloop]
if <ast.UnaryOp object at 0x7da1b0e60730> begin[:]
<ast.Raise object at 0x7da1b0e63940>
if call[name[isinstance], parameter[name[file], name[int]]] begin[:]
variable[fd] assign[=] name[file]
call[name[init_tk_default_root], parameter[]]
name[self].widget assign[=] name[TKNTR]._default_root
if <ast.UnaryOp object at 0x7da1b0e629b0> begin[:]
variable[s] assign[=] list[[]]
while compare[name[nbytes] greater[>] constant[0]] begin[:]
variable[snew] assign[=] call[name[os].read, parameter[name[fd], name[nbytes]]]
if name[snew] begin[:]
if name[PY3K] begin[:]
variable[snew] assign[=] call[name[snew].decode, parameter[constant[ascii], constant[replace]]]
call[name[s].append, parameter[name[snew]]]
<ast.AugAssign object at 0x7da1b0e61750>
return[call[constant[].join, parameter[name[s]]]] | keyword[def] identifier[read] ( identifier[self] , identifier[file] , identifier[nbytes] ):
literal[string]
keyword[if] keyword[not] identifier[capable] . identifier[OF_GRAPHICS] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[file] , identifier[int] ):
identifier[fd] = identifier[file]
keyword[else] :
keyword[try] :
identifier[fd] = identifier[file] . identifier[fileno] ()
keyword[except] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[init_tk_default_root] ()
identifier[self] . identifier[widget] = identifier[TKNTR] . identifier[_default_root]
keyword[if] keyword[not] identifier[self] . identifier[widget] :
identifier[s] =[]
keyword[while] identifier[nbytes] > literal[int] :
identifier[snew] = identifier[os] . identifier[read] ( identifier[fd] , identifier[nbytes] )
keyword[if] identifier[snew] :
keyword[if] identifier[PY3K] : identifier[snew] = identifier[snew] . identifier[decode] ( literal[string] , literal[string] )
identifier[s] . identifier[append] ( identifier[snew] )
identifier[nbytes] -= identifier[len] ( identifier[snew] )
keyword[else] :
keyword[break]
keyword[return] literal[string] . identifier[join] ( identifier[s] )
keyword[else] :
identifier[self] . identifier[nbytes] = identifier[nbytes]
identifier[self] . identifier[value] =[]
identifier[self] . identifier[widget] . identifier[tk] . identifier[createfilehandler] ( identifier[fd] ,
identifier[TKNTR] . identifier[READABLE] | identifier[TKNTR] . identifier[EXCEPTION] ,
identifier[self] . identifier[_read] )
keyword[try] :
identifier[self] . identifier[widget] . identifier[mainloop] ()
keyword[finally] :
identifier[self] . identifier[widget] . identifier[tk] . identifier[deletefilehandler] ( identifier[fd] )
keyword[return] literal[string] . identifier[join] ( identifier[self] . identifier[value] ) | def read(self, file, nbytes):
"""Read nbytes characters from file while running Tk mainloop"""
if not capable.OF_GRAPHICS:
raise RuntimeError('Cannot run this command without graphics') # depends on [control=['if'], data=[]]
if isinstance(file, int):
fd = file # depends on [control=['if'], data=[]]
else:
# Otherwise, assume we have Python file object
try:
fd = file.fileno() # depends on [control=['try'], data=[]]
except:
raise TypeError('file must be an integer or a filehandle/socket') # depends on [control=['except'], data=[]]
init_tk_default_root() # harmless if already done
self.widget = TKNTR._default_root
if not self.widget:
# no Tk widgets yet, so no need for mainloop
# (shouldnt happen now with init_tk_default_root)
s = []
while nbytes > 0:
snew = os.read(fd, nbytes) # returns bytes in PY3K
if snew:
if PY3K:
snew = snew.decode('ascii', 'replace') # depends on [control=['if'], data=[]]
s.append(snew)
nbytes -= len(snew) # depends on [control=['if'], data=[]]
else:
# EOF -- just return what we have so far
break # depends on [control=['while'], data=['nbytes']]
return ''.join(s) # depends on [control=['if'], data=[]]
else:
self.nbytes = nbytes
self.value = []
self.widget.tk.createfilehandler(fd, TKNTR.READABLE | TKNTR.EXCEPTION, self._read)
try:
self.widget.mainloop() # depends on [control=['try'], data=[]]
finally:
self.widget.tk.deletefilehandler(fd)
return ''.join(self.value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.