code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_process_tiles(self, zoom=None):
"""
Yield process tiles.
Tiles intersecting with the input data bounding boxes as well as
process bounds, if provided, are considered process tiles. This is to
avoid iterating through empty tiles.
Parameters
----------
zoom : integer
zoom level process tiles should be returned from; if none is given,
return all process tiles
yields
------
BufferedTile objects
"""
if zoom or zoom == 0:
for tile in self.config.process_pyramid.tiles_from_geom(
self.config.area_at_zoom(zoom), zoom
):
yield tile
else:
for zoom in reversed(self.config.zoom_levels):
for tile in self.config.process_pyramid.tiles_from_geom(
self.config.area_at_zoom(zoom), zoom
):
yield tile | def function[get_process_tiles, parameter[self, zoom]]:
constant[
Yield process tiles.
Tiles intersecting with the input data bounding boxes as well as
process bounds, if provided, are considered process tiles. This is to
avoid iterating through empty tiles.
Parameters
----------
zoom : integer
zoom level process tiles should be returned from; if none is given,
return all process tiles
yields
------
BufferedTile objects
]
if <ast.BoolOp object at 0x7da1b013c6a0> begin[:]
for taget[name[tile]] in starred[call[name[self].config.process_pyramid.tiles_from_geom, parameter[call[name[self].config.area_at_zoom, parameter[name[zoom]]], name[zoom]]]] begin[:]
<ast.Yield object at 0x7da1b012e3e0> | keyword[def] identifier[get_process_tiles] ( identifier[self] , identifier[zoom] = keyword[None] ):
literal[string]
keyword[if] identifier[zoom] keyword[or] identifier[zoom] == literal[int] :
keyword[for] identifier[tile] keyword[in] identifier[self] . identifier[config] . identifier[process_pyramid] . identifier[tiles_from_geom] (
identifier[self] . identifier[config] . identifier[area_at_zoom] ( identifier[zoom] ), identifier[zoom]
):
keyword[yield] identifier[tile]
keyword[else] :
keyword[for] identifier[zoom] keyword[in] identifier[reversed] ( identifier[self] . identifier[config] . identifier[zoom_levels] ):
keyword[for] identifier[tile] keyword[in] identifier[self] . identifier[config] . identifier[process_pyramid] . identifier[tiles_from_geom] (
identifier[self] . identifier[config] . identifier[area_at_zoom] ( identifier[zoom] ), identifier[zoom]
):
keyword[yield] identifier[tile] | def get_process_tiles(self, zoom=None):
"""
Yield process tiles.
Tiles intersecting with the input data bounding boxes as well as
process bounds, if provided, are considered process tiles. This is to
avoid iterating through empty tiles.
Parameters
----------
zoom : integer
zoom level process tiles should be returned from; if none is given,
return all process tiles
yields
------
BufferedTile objects
"""
if zoom or zoom == 0:
for tile in self.config.process_pyramid.tiles_from_geom(self.config.area_at_zoom(zoom), zoom):
yield tile # depends on [control=['for'], data=['tile']] # depends on [control=['if'], data=[]]
else:
for zoom in reversed(self.config.zoom_levels):
for tile in self.config.process_pyramid.tiles_from_geom(self.config.area_at_zoom(zoom), zoom):
yield tile # depends on [control=['for'], data=['tile']] # depends on [control=['for'], data=['zoom']] |
def bundle_lambda(zipfile):
"""Write zipfile contents to file.
:param zipfile:
:return: exit_code
"""
# TODO have 'bundle.zip' as default config
if not zipfile:
return 1
with open('bundle.zip', 'wb') as zfile:
zfile.write(zipfile)
log.info('Finished - a bundle.zip is waiting for you...')
return 0 | def function[bundle_lambda, parameter[zipfile]]:
constant[Write zipfile contents to file.
:param zipfile:
:return: exit_code
]
if <ast.UnaryOp object at 0x7da207f02470> begin[:]
return[constant[1]]
with call[name[open], parameter[constant[bundle.zip], constant[wb]]] begin[:]
call[name[zfile].write, parameter[name[zipfile]]]
call[name[log].info, parameter[constant[Finished - a bundle.zip is waiting for you...]]]
return[constant[0]] | keyword[def] identifier[bundle_lambda] ( identifier[zipfile] ):
literal[string]
keyword[if] keyword[not] identifier[zipfile] :
keyword[return] literal[int]
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[zfile] :
identifier[zfile] . identifier[write] ( identifier[zipfile] )
identifier[log] . identifier[info] ( literal[string] )
keyword[return] literal[int] | def bundle_lambda(zipfile):
"""Write zipfile contents to file.
:param zipfile:
:return: exit_code
"""
# TODO have 'bundle.zip' as default config
if not zipfile:
return 1 # depends on [control=['if'], data=[]]
with open('bundle.zip', 'wb') as zfile:
zfile.write(zipfile) # depends on [control=['with'], data=['zfile']]
log.info('Finished - a bundle.zip is waiting for you...')
return 0 |
def display(self):
"""
Use a number of methods to guess if the default webbrowser will open in
the background as opposed to opening directly in the terminal.
"""
if self._display is None:
if sys.platform == 'darwin':
# OS X won't set $DISPLAY unless xQuartz is installed.
# If you're using OS X and you want to access a terminal
# browser, you need to set it manually via $BROWSER.
# See issue #166
display = True
else:
display = bool(os.environ.get("DISPLAY"))
# Use the convention defined here to parse $BROWSER
# https://docs.python.org/2/library/webbrowser.html
console_browsers = ['www-browser', 'links', 'links2', 'elinks',
'lynx', 'w3m']
if "BROWSER" in os.environ:
user_browser = os.environ["BROWSER"].split(os.pathsep)[0]
if user_browser in console_browsers:
display = False
if webbrowser._tryorder:
if webbrowser._tryorder[0] in console_browsers:
display = False
self._display = display
return self._display | def function[display, parameter[self]]:
constant[
Use a number of methods to guess if the default webbrowser will open in
the background as opposed to opening directly in the terminal.
]
if compare[name[self]._display is constant[None]] begin[:]
if compare[name[sys].platform equal[==] constant[darwin]] begin[:]
variable[display] assign[=] constant[True]
variable[console_browsers] assign[=] list[[<ast.Constant object at 0x7da2044c16f0>, <ast.Constant object at 0x7da2044c2020>, <ast.Constant object at 0x7da2044c3130>, <ast.Constant object at 0x7da2044c0d30>, <ast.Constant object at 0x7da2044c2b60>, <ast.Constant object at 0x7da2044c2d40>]]
if compare[constant[BROWSER] in name[os].environ] begin[:]
variable[user_browser] assign[=] call[call[call[name[os].environ][constant[BROWSER]].split, parameter[name[os].pathsep]]][constant[0]]
if compare[name[user_browser] in name[console_browsers]] begin[:]
variable[display] assign[=] constant[False]
if name[webbrowser]._tryorder begin[:]
if compare[call[name[webbrowser]._tryorder][constant[0]] in name[console_browsers]] begin[:]
variable[display] assign[=] constant[False]
name[self]._display assign[=] name[display]
return[name[self]._display] | keyword[def] identifier[display] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_display] keyword[is] keyword[None] :
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[display] = keyword[True]
keyword[else] :
identifier[display] = identifier[bool] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] ))
identifier[console_browsers] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ]
keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] :
identifier[user_browser] = identifier[os] . identifier[environ] [ literal[string] ]. identifier[split] ( identifier[os] . identifier[pathsep] )[ literal[int] ]
keyword[if] identifier[user_browser] keyword[in] identifier[console_browsers] :
identifier[display] = keyword[False]
keyword[if] identifier[webbrowser] . identifier[_tryorder] :
keyword[if] identifier[webbrowser] . identifier[_tryorder] [ literal[int] ] keyword[in] identifier[console_browsers] :
identifier[display] = keyword[False]
identifier[self] . identifier[_display] = identifier[display]
keyword[return] identifier[self] . identifier[_display] | def display(self):
"""
Use a number of methods to guess if the default webbrowser will open in
the background as opposed to opening directly in the terminal.
"""
if self._display is None:
if sys.platform == 'darwin':
# OS X won't set $DISPLAY unless xQuartz is installed.
# If you're using OS X and you want to access a terminal
# browser, you need to set it manually via $BROWSER.
# See issue #166
display = True # depends on [control=['if'], data=[]]
else:
display = bool(os.environ.get('DISPLAY'))
# Use the convention defined here to parse $BROWSER
# https://docs.python.org/2/library/webbrowser.html
console_browsers = ['www-browser', 'links', 'links2', 'elinks', 'lynx', 'w3m']
if 'BROWSER' in os.environ:
user_browser = os.environ['BROWSER'].split(os.pathsep)[0]
if user_browser in console_browsers:
display = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if webbrowser._tryorder:
if webbrowser._tryorder[0] in console_browsers:
display = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self._display = display # depends on [control=['if'], data=[]]
return self._display |
def init_with_context(self, context):
"""
Initializes the icon list.
"""
super(CmsAppIconList, self).init_with_context(context)
apps = self.children
cms_apps = [a for a in apps if is_cms_app(a['name'])]
non_cms_apps = [a for a in apps if a not in cms_apps]
if cms_apps:
# Group the models of all CMS apps in one group.
cms_models = []
for app in cms_apps:
cms_models += app['models']
sort_cms_models(cms_models)
single_cms_app = {'name': "Modules", 'title': "Modules", 'url': "", 'models': cms_models}
# Put remaining groups after the first CMS group.
self.children = [single_cms_app] + non_cms_apps | def function[init_with_context, parameter[self, context]]:
constant[
Initializes the icon list.
]
call[call[name[super], parameter[name[CmsAppIconList], name[self]]].init_with_context, parameter[name[context]]]
variable[apps] assign[=] name[self].children
variable[cms_apps] assign[=] <ast.ListComp object at 0x7da20c6a9390>
variable[non_cms_apps] assign[=] <ast.ListComp object at 0x7da20c6ab520>
if name[cms_apps] begin[:]
variable[cms_models] assign[=] list[[]]
for taget[name[app]] in starred[name[cms_apps]] begin[:]
<ast.AugAssign object at 0x7da20c6a9840>
call[name[sort_cms_models], parameter[name[cms_models]]]
variable[single_cms_app] assign[=] dictionary[[<ast.Constant object at 0x7da207f98790>, <ast.Constant object at 0x7da207f99150>, <ast.Constant object at 0x7da207f99990>, <ast.Constant object at 0x7da207f9b400>], [<ast.Constant object at 0x7da207f9b700>, <ast.Constant object at 0x7da207f9a950>, <ast.Constant object at 0x7da207f99960>, <ast.Name object at 0x7da207f9a740>]]
name[self].children assign[=] binary_operation[list[[<ast.Name object at 0x7da207f9a110>]] + name[non_cms_apps]] | keyword[def] identifier[init_with_context] ( identifier[self] , identifier[context] ):
literal[string]
identifier[super] ( identifier[CmsAppIconList] , identifier[self] ). identifier[init_with_context] ( identifier[context] )
identifier[apps] = identifier[self] . identifier[children]
identifier[cms_apps] =[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[apps] keyword[if] identifier[is_cms_app] ( identifier[a] [ literal[string] ])]
identifier[non_cms_apps] =[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[apps] keyword[if] identifier[a] keyword[not] keyword[in] identifier[cms_apps] ]
keyword[if] identifier[cms_apps] :
identifier[cms_models] =[]
keyword[for] identifier[app] keyword[in] identifier[cms_apps] :
identifier[cms_models] += identifier[app] [ literal[string] ]
identifier[sort_cms_models] ( identifier[cms_models] )
identifier[single_cms_app] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[cms_models] }
identifier[self] . identifier[children] =[ identifier[single_cms_app] ]+ identifier[non_cms_apps] | def init_with_context(self, context):
"""
Initializes the icon list.
"""
super(CmsAppIconList, self).init_with_context(context)
apps = self.children
cms_apps = [a for a in apps if is_cms_app(a['name'])]
non_cms_apps = [a for a in apps if a not in cms_apps]
if cms_apps:
# Group the models of all CMS apps in one group.
cms_models = []
for app in cms_apps:
cms_models += app['models'] # depends on [control=['for'], data=['app']]
sort_cms_models(cms_models)
single_cms_app = {'name': 'Modules', 'title': 'Modules', 'url': '', 'models': cms_models}
# Put remaining groups after the first CMS group.
self.children = [single_cms_app] + non_cms_apps # depends on [control=['if'], data=[]] |
def AppendSource(self, type_indicator, attributes):
"""Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if not type_indicator:
raise errors.FormatError('Missing type indicator.')
try:
source_object = registry.ArtifactDefinitionsRegistry.CreateSourceType(
type_indicator, attributes)
except (AttributeError, TypeError) as exception:
raise errors.FormatError((
'Unable to create source type: {0:s} for artifact definition: {1:s} '
'with error: {2!s}').format(type_indicator, self.name, exception))
self.sources.append(source_object)
return source_object | def function[AppendSource, parameter[self, type_indicator, attributes]]:
constant[Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
]
if <ast.UnaryOp object at 0x7da1b1d38af0> begin[:]
<ast.Raise object at 0x7da1b1d3b3a0>
<ast.Try object at 0x7da1b1d3be50>
call[name[self].sources.append, parameter[name[source_object]]]
return[name[source_object]] | keyword[def] identifier[AppendSource] ( identifier[self] , identifier[type_indicator] , identifier[attributes] ):
literal[string]
keyword[if] keyword[not] identifier[type_indicator] :
keyword[raise] identifier[errors] . identifier[FormatError] ( literal[string] )
keyword[try] :
identifier[source_object] = identifier[registry] . identifier[ArtifactDefinitionsRegistry] . identifier[CreateSourceType] (
identifier[type_indicator] , identifier[attributes] )
keyword[except] ( identifier[AttributeError] , identifier[TypeError] ) keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[FormatError] ((
literal[string]
literal[string] ). identifier[format] ( identifier[type_indicator] , identifier[self] . identifier[name] , identifier[exception] ))
identifier[self] . identifier[sources] . identifier[append] ( identifier[source_object] )
keyword[return] identifier[source_object] | def AppendSource(self, type_indicator, attributes):
"""Appends a source.
If you want to implement your own source type you should create a subclass
in source_type.py and change the AppendSource method to handle the new
subclass. This function raises FormatError if an unsupported source type
indicator is encountered.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if not type_indicator:
raise errors.FormatError('Missing type indicator.') # depends on [control=['if'], data=[]]
try:
source_object = registry.ArtifactDefinitionsRegistry.CreateSourceType(type_indicator, attributes) # depends on [control=['try'], data=[]]
except (AttributeError, TypeError) as exception:
raise errors.FormatError('Unable to create source type: {0:s} for artifact definition: {1:s} with error: {2!s}'.format(type_indicator, self.name, exception)) # depends on [control=['except'], data=['exception']]
self.sources.append(source_object)
return source_object |
def run_hook(self, app: FlaskUnchained, bundles: List[Bundle]):
"""
Hook entry point. Override to disable standard behavior of iterating
over bundles to discover objects and processing them.
"""
self.process_objects(app, self.collect_from_bundles(bundles)) | def function[run_hook, parameter[self, app, bundles]]:
constant[
Hook entry point. Override to disable standard behavior of iterating
over bundles to discover objects and processing them.
]
call[name[self].process_objects, parameter[name[app], call[name[self].collect_from_bundles, parameter[name[bundles]]]]] | keyword[def] identifier[run_hook] ( identifier[self] , identifier[app] : identifier[FlaskUnchained] , identifier[bundles] : identifier[List] [ identifier[Bundle] ]):
literal[string]
identifier[self] . identifier[process_objects] ( identifier[app] , identifier[self] . identifier[collect_from_bundles] ( identifier[bundles] )) | def run_hook(self, app: FlaskUnchained, bundles: List[Bundle]):
"""
Hook entry point. Override to disable standard behavior of iterating
over bundles to discover objects and processing them.
"""
self.process_objects(app, self.collect_from_bundles(bundles)) |
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value)) | def function[set_hparam, parameter[self, name, value]]:
constant[Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
]
<ast.Tuple object at 0x7da1b1ff2500> assign[=] call[name[self]._hparam_types][name[name]]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1ff3cd0> begin[:]
<ast.Raise object at 0x7da1b1ff1a50>
call[name[setattr], parameter[name[self], name[name], <ast.ListComp object at 0x7da1b1ff0550>]] | keyword[def] identifier[set_hparam] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[param_type] , identifier[is_list] = identifier[self] . identifier[_hparam_types] [ identifier[name] ]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[if] keyword[not] identifier[is_list] :
keyword[raise] identifier[ValueError] (
literal[string] % identifier[name] )
identifier[setattr] ( identifier[self] , identifier[name] ,[
identifier[_cast_to_type_if_compatible] ( identifier[name] , identifier[param_type] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[value] ])
keyword[else] :
keyword[if] identifier[is_list] :
keyword[raise] identifier[ValueError] (
literal[string] % identifier[name] )
identifier[setattr] ( identifier[self] , identifier[name] , identifier[_cast_to_type_if_compatible] ( identifier[name] , identifier[param_type] , identifier[value] )) | def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
(param_type, is_list) = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError('Must not pass a list for single-valued parameter: %s' % name) # depends on [control=['if'], data=[]]
setattr(self, name, [_cast_to_type_if_compatible(name, param_type, v) for v in value]) # depends on [control=['if'], data=[]]
else:
if is_list:
raise ValueError('Must pass a list for multi-valued parameter: %s.' % name) # depends on [control=['if'], data=[]]
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value)) |
def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid) | def function[classify_regions, parameter[dataset, masks, method, threshold, remove_overlap, regularization, output, studies, features, class_weight, classifier, cross_val, param_grid, scoring]]:
constant[ Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
]
<ast.Tuple object at 0x7da204622680> assign[=] call[name[get_studies_by_regions], parameter[name[dataset], name[masks], name[threshold], name[remove_overlap], name[studies], name[features]]]
return[call[name[classify], parameter[name[X], name[y], name[method], name[classifier], name[output], name[cross_val], name[class_weight]]]] | keyword[def] identifier[classify_regions] ( identifier[dataset] , identifier[masks] , identifier[method] = literal[string] , identifier[threshold] = literal[int] ,
identifier[remove_overlap] = keyword[True] , identifier[regularization] = literal[string] ,
identifier[output] = literal[string] , identifier[studies] = keyword[None] , identifier[features] = keyword[None] ,
identifier[class_weight] = literal[string] , identifier[classifier] = keyword[None] ,
identifier[cross_val] = literal[string] , identifier[param_grid] = keyword[None] , identifier[scoring] = literal[string] ):
literal[string]
( identifier[X] , identifier[y] )= identifier[get_studies_by_regions] ( identifier[dataset] , identifier[masks] , identifier[threshold] , identifier[remove_overlap] ,
identifier[studies] , identifier[features] ,
identifier[regularization] = identifier[regularization] )
keyword[return] identifier[classify] ( identifier[X] , identifier[y] , identifier[method] , identifier[classifier] , identifier[output] , identifier[cross_val] ,
identifier[class_weight] , identifier[scoring] = identifier[scoring] , identifier[param_grid] = identifier[param_grid] ) | def classify_regions(dataset, masks, method='ERF', threshold=0.08, remove_overlap=True, regularization='scale', output='summary', studies=None, features=None, class_weight='auto', classifier=None, cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap, studies, features, regularization=regularization)
return classify(X, y, method, classifier, output, cross_val, class_weight, scoring=scoring, param_grid=param_grid) |
def user_exists(name,
user=None, host=None, port=None, maintenance_db=None,
password=None,
runas=None):
'''
Checks if a user exists on the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.user_exists 'username'
'''
return bool(
role_get(name,
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas,
return_password=False)) | def function[user_exists, parameter[name, user, host, port, maintenance_db, password, runas]]:
constant[
Checks if a user exists on the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.user_exists 'username'
]
return[call[name[bool], parameter[call[name[role_get], parameter[name[name]]]]]] | keyword[def] identifier[user_exists] ( identifier[name] ,
identifier[user] = keyword[None] , identifier[host] = keyword[None] , identifier[port] = keyword[None] , identifier[maintenance_db] = keyword[None] ,
identifier[password] = keyword[None] ,
identifier[runas] = keyword[None] ):
literal[string]
keyword[return] identifier[bool] (
identifier[role_get] ( identifier[name] ,
identifier[user] = identifier[user] ,
identifier[host] = identifier[host] ,
identifier[port] = identifier[port] ,
identifier[maintenance_db] = identifier[maintenance_db] ,
identifier[password] = identifier[password] ,
identifier[runas] = identifier[runas] ,
identifier[return_password] = keyword[False] )) | def user_exists(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None):
"""
Checks if a user exists on the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.user_exists 'username'
"""
return bool(role_get(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, return_password=False)) |
def list_opts():
"""Returns a list of oslo_config options available in the library.
The returned list includes all oslo_config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
from tvrenamer.common import tools
all_opts = []
all_opts.extend(tools.make_opt_list([CLI_OPTS,
EPISODE_OPTS,
FORMAT_OPTS], None))
all_opts.extend(tools.make_opt_list([CACHE_OPTS], 'cache'))
return all_opts | def function[list_opts, parameter[]]:
constant[Returns a list of oslo_config options available in the library.
The returned list includes all oslo_config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
]
from relative_module[tvrenamer.common] import module[tools]
variable[all_opts] assign[=] list[[]]
call[name[all_opts].extend, parameter[call[name[tools].make_opt_list, parameter[list[[<ast.Name object at 0x7da1b0abb670>, <ast.Name object at 0x7da1b0abb160>, <ast.Name object at 0x7da1b0ab9e10>]], constant[None]]]]]
call[name[all_opts].extend, parameter[call[name[tools].make_opt_list, parameter[list[[<ast.Name object at 0x7da1b0abb9d0>]], constant[cache]]]]]
return[name[all_opts]] | keyword[def] identifier[list_opts] ():
literal[string]
keyword[from] identifier[tvrenamer] . identifier[common] keyword[import] identifier[tools]
identifier[all_opts] =[]
identifier[all_opts] . identifier[extend] ( identifier[tools] . identifier[make_opt_list] ([ identifier[CLI_OPTS] ,
identifier[EPISODE_OPTS] ,
identifier[FORMAT_OPTS] ], keyword[None] ))
identifier[all_opts] . identifier[extend] ( identifier[tools] . identifier[make_opt_list] ([ identifier[CACHE_OPTS] ], literal[string] ))
keyword[return] identifier[all_opts] | def list_opts():
"""Returns a list of oslo_config options available in the library.
The returned list includes all oslo_config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
from tvrenamer.common import tools
all_opts = []
all_opts.extend(tools.make_opt_list([CLI_OPTS, EPISODE_OPTS, FORMAT_OPTS], None))
all_opts.extend(tools.make_opt_list([CACHE_OPTS], 'cache'))
return all_opts |
def prod_sum_var(A, B):
"""dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
"""
return A.multiply(B).sum(1).A1 if issparse(A) else np.einsum('ij, ij -> i', A, B) | def function[prod_sum_var, parameter[A, B]]:
constant[dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
]
return[<ast.IfExp object at 0x7da20e954820>] | keyword[def] identifier[prod_sum_var] ( identifier[A] , identifier[B] ):
literal[string]
keyword[return] identifier[A] . identifier[multiply] ( identifier[B] ). identifier[sum] ( literal[int] ). identifier[A1] keyword[if] identifier[issparse] ( identifier[A] ) keyword[else] identifier[np] . identifier[einsum] ( literal[string] , identifier[A] , identifier[B] ) | def prod_sum_var(A, B):
"""dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
"""
return A.multiply(B).sum(1).A1 if issparse(A) else np.einsum('ij, ij -> i', A, B) |
def _try_run(self, run_func: Callable[[], None]) -> None:
"""
Try running the given function (training/prediction).
Calls
- :py:meth:`cxflow.hooks.AbstractHook.before_training`
- :py:meth:`cxflow.hooks.AbstractHook.after_training`
:param run_func: function to be run
"""
# Initialization: before_training
for hook in self._hooks:
hook.before_training()
try:
run_func()
except TrainingTerminated as ex:
logging.info('Training terminated: %s', ex)
# After training: after_training
for hook in self._hooks:
hook.after_training() | def function[_try_run, parameter[self, run_func]]:
constant[
Try running the given function (training/prediction).
Calls
- :py:meth:`cxflow.hooks.AbstractHook.before_training`
- :py:meth:`cxflow.hooks.AbstractHook.after_training`
:param run_func: function to be run
]
for taget[name[hook]] in starred[name[self]._hooks] begin[:]
call[name[hook].before_training, parameter[]]
<ast.Try object at 0x7da20c6a9f30>
for taget[name[hook]] in starred[name[self]._hooks] begin[:]
call[name[hook].after_training, parameter[]] | keyword[def] identifier[_try_run] ( identifier[self] , identifier[run_func] : identifier[Callable] [[], keyword[None] ])-> keyword[None] :
literal[string]
keyword[for] identifier[hook] keyword[in] identifier[self] . identifier[_hooks] :
identifier[hook] . identifier[before_training] ()
keyword[try] :
identifier[run_func] ()
keyword[except] identifier[TrainingTerminated] keyword[as] identifier[ex] :
identifier[logging] . identifier[info] ( literal[string] , identifier[ex] )
keyword[for] identifier[hook] keyword[in] identifier[self] . identifier[_hooks] :
identifier[hook] . identifier[after_training] () | def _try_run(self, run_func: Callable[[], None]) -> None:
"""
Try running the given function (training/prediction).
Calls
- :py:meth:`cxflow.hooks.AbstractHook.before_training`
- :py:meth:`cxflow.hooks.AbstractHook.after_training`
:param run_func: function to be run
"""
# Initialization: before_training
for hook in self._hooks:
hook.before_training() # depends on [control=['for'], data=['hook']]
try:
run_func() # depends on [control=['try'], data=[]]
except TrainingTerminated as ex:
logging.info('Training terminated: %s', ex) # depends on [control=['except'], data=['ex']]
# After training: after_training
for hook in self._hooks:
hook.after_training() # depends on [control=['for'], data=['hook']] |
def _tristate_parent(self, item):
"""
Put the box of item in tristate and change the state of the boxes of
item's ancestors accordingly.
"""
self.change_state(item, "tristate")
parent = self.parent(item)
if parent:
self._tristate_parent(parent) | def function[_tristate_parent, parameter[self, item]]:
constant[
Put the box of item in tristate and change the state of the boxes of
item's ancestors accordingly.
]
call[name[self].change_state, parameter[name[item], constant[tristate]]]
variable[parent] assign[=] call[name[self].parent, parameter[name[item]]]
if name[parent] begin[:]
call[name[self]._tristate_parent, parameter[name[parent]]] | keyword[def] identifier[_tristate_parent] ( identifier[self] , identifier[item] ):
literal[string]
identifier[self] . identifier[change_state] ( identifier[item] , literal[string] )
identifier[parent] = identifier[self] . identifier[parent] ( identifier[item] )
keyword[if] identifier[parent] :
identifier[self] . identifier[_tristate_parent] ( identifier[parent] ) | def _tristate_parent(self, item):
"""
Put the box of item in tristate and change the state of the boxes of
item's ancestors accordingly.
"""
self.change_state(item, 'tristate')
parent = self.parent(item)
if parent:
self._tristate_parent(parent) # depends on [control=['if'], data=[]] |
def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix)) | def function[add_suffix, parameter[in_files, suffix]]:
constant[
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
]
import module[os.path] as alias[op]
from relative_module[nipype.utils.filemanip] import module[fname_presuffix], module[filename_to_list]
return[call[name[op].basename, parameter[call[name[fname_presuffix], parameter[call[call[name[filename_to_list], parameter[name[in_files]]]][constant[0]]]]]]] | keyword[def] identifier[add_suffix] ( identifier[in_files] , identifier[suffix] ):
literal[string]
keyword[import] identifier[os] . identifier[path] keyword[as] identifier[op]
keyword[from] identifier[nipype] . identifier[utils] . identifier[filemanip] keyword[import] identifier[fname_presuffix] , identifier[filename_to_list]
keyword[return] identifier[op] . identifier[basename] ( identifier[fname_presuffix] ( identifier[filename_to_list] ( identifier[in_files] )[ literal[int] ],
identifier[suffix] = identifier[suffix] )) | def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0], suffix=suffix)) |
def generate_observation_from_state(self, state_index):
"""
Generate a single synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
Returns
-------
observation : float
A single observation from the given state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observation = output_model.generate_observation_from_state(0)
"""
# generate random generator (note that this is inefficient - better use one of the next functions
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(len(self._output_probabilities[state_index])),
self._output_probabilities[state_index]))
gen.rvs(size=1) | def function[generate_observation_from_state, parameter[self, state_index]]:
constant[
Generate a single synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
Returns
-------
observation : float
A single observation from the given state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observation = output_model.generate_observation_from_state(0)
]
import module[scipy.stats]
variable[gen] assign[=] call[name[scipy].stats.rv_discrete, parameter[]]
call[name[gen].rvs, parameter[]] | keyword[def] identifier[generate_observation_from_state] ( identifier[self] , identifier[state_index] ):
literal[string]
keyword[import] identifier[scipy] . identifier[stats]
identifier[gen] = identifier[scipy] . identifier[stats] . identifier[rv_discrete] ( identifier[values] =( identifier[range] ( identifier[len] ( identifier[self] . identifier[_output_probabilities] [ identifier[state_index] ])),
identifier[self] . identifier[_output_probabilities] [ identifier[state_index] ]))
identifier[gen] . identifier[rvs] ( identifier[size] = literal[int] ) | def generate_observation_from_state(self, state_index):
"""
Generate a single synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
Returns
-------
observation : float
A single observation from the given state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observation = output_model.generate_observation_from_state(0)
"""
# generate random generator (note that this is inefficient - better use one of the next functions
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(len(self._output_probabilities[state_index])), self._output_probabilities[state_index]))
gen.rvs(size=1) |
def add_job_to_context(context, job_id):
"""Adds job to neutron context for use later."""
db_job = db_api.async_transaction_find(
context, id=job_id, scope=db_api.ONE)
if not db_job:
return
context.async_job = {"job": v._make_job_dict(db_job)} | def function[add_job_to_context, parameter[context, job_id]]:
constant[Adds job to neutron context for use later.]
variable[db_job] assign[=] call[name[db_api].async_transaction_find, parameter[name[context]]]
if <ast.UnaryOp object at 0x7da1b2344370> begin[:]
return[None]
name[context].async_job assign[=] dictionary[[<ast.Constant object at 0x7da1b23440a0>], [<ast.Call object at 0x7da1b2345b70>]] | keyword[def] identifier[add_job_to_context] ( identifier[context] , identifier[job_id] ):
literal[string]
identifier[db_job] = identifier[db_api] . identifier[async_transaction_find] (
identifier[context] , identifier[id] = identifier[job_id] , identifier[scope] = identifier[db_api] . identifier[ONE] )
keyword[if] keyword[not] identifier[db_job] :
keyword[return]
identifier[context] . identifier[async_job] ={ literal[string] : identifier[v] . identifier[_make_job_dict] ( identifier[db_job] )} | def add_job_to_context(context, job_id):
"""Adds job to neutron context for use later."""
db_job = db_api.async_transaction_find(context, id=job_id, scope=db_api.ONE)
if not db_job:
return # depends on [control=['if'], data=[]]
context.async_job = {'job': v._make_job_dict(db_job)} |
def _request(request, request_fallback=None):
''' Extract request fields wherever they may come from: GET, POST, forms, fallback '''
# Use lambdas to avoid evaluating bottle.request.* which may throw an Error
all_dicts = [
lambda: request.json,
lambda: request.forms,
lambda: request.query,
lambda: request.files,
#lambda: request.POST,
lambda: request_fallback
]
request_dict = dict()
for req_dict_ in all_dicts:
try:
req_dict = req_dict_()
except KeyError:
continue
if req_dict is not None and hasattr(req_dict, 'items'):
for req_key, req_val in req_dict.items():
request_dict[req_key] = req_val
return request_dict | def function[_request, parameter[request, request_fallback]]:
constant[ Extract request fields wherever they may come from: GET, POST, forms, fallback ]
variable[all_dicts] assign[=] list[[<ast.Lambda object at 0x7da1b1415270>, <ast.Lambda object at 0x7da1b1416770>, <ast.Lambda object at 0x7da1b1640880>, <ast.Lambda object at 0x7da1b1643340>, <ast.Lambda object at 0x7da1b16401c0>]]
variable[request_dict] assign[=] call[name[dict], parameter[]]
for taget[name[req_dict_]] in starred[name[all_dicts]] begin[:]
<ast.Try object at 0x7da1b1640ca0>
if <ast.BoolOp object at 0x7da1b16428f0> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1641510>, <ast.Name object at 0x7da1b1642ad0>]]] in starred[call[name[req_dict].items, parameter[]]] begin[:]
call[name[request_dict]][name[req_key]] assign[=] name[req_val]
return[name[request_dict]] | keyword[def] identifier[_request] ( identifier[request] , identifier[request_fallback] = keyword[None] ):
literal[string]
identifier[all_dicts] =[
keyword[lambda] : identifier[request] . identifier[json] ,
keyword[lambda] : identifier[request] . identifier[forms] ,
keyword[lambda] : identifier[request] . identifier[query] ,
keyword[lambda] : identifier[request] . identifier[files] ,
keyword[lambda] : identifier[request_fallback]
]
identifier[request_dict] = identifier[dict] ()
keyword[for] identifier[req_dict_] keyword[in] identifier[all_dicts] :
keyword[try] :
identifier[req_dict] = identifier[req_dict_] ()
keyword[except] identifier[KeyError] :
keyword[continue]
keyword[if] identifier[req_dict] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[req_dict] , literal[string] ):
keyword[for] identifier[req_key] , identifier[req_val] keyword[in] identifier[req_dict] . identifier[items] ():
identifier[request_dict] [ identifier[req_key] ]= identifier[req_val]
keyword[return] identifier[request_dict] | def _request(request, request_fallback=None):
""" Extract request fields wherever they may come from: GET, POST, forms, fallback """
# Use lambdas to avoid evaluating bottle.request.* which may throw an Error
#lambda: request.POST,
all_dicts = [lambda : request.json, lambda : request.forms, lambda : request.query, lambda : request.files, lambda : request_fallback]
request_dict = dict()
for req_dict_ in all_dicts:
try:
req_dict = req_dict_() # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]]
if req_dict is not None and hasattr(req_dict, 'items'):
for (req_key, req_val) in req_dict.items():
request_dict[req_key] = req_val # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['req_dict_']]
return request_dict |
def load_df(self, df):
'''
Load Pandas DataFrame.
'''
# self.__init__()
self.reset()
df_dict = {}
df_dict['mat'] = deepcopy(df)
# always define category colors if applicable when loading a df
data_formats.df_to_dat(self, df_dict, define_cat_colors=True) | def function[load_df, parameter[self, df]]:
constant[
Load Pandas DataFrame.
]
call[name[self].reset, parameter[]]
variable[df_dict] assign[=] dictionary[[], []]
call[name[df_dict]][constant[mat]] assign[=] call[name[deepcopy], parameter[name[df]]]
call[name[data_formats].df_to_dat, parameter[name[self], name[df_dict]]] | keyword[def] identifier[load_df] ( identifier[self] , identifier[df] ):
literal[string]
identifier[self] . identifier[reset] ()
identifier[df_dict] ={}
identifier[df_dict] [ literal[string] ]= identifier[deepcopy] ( identifier[df] )
identifier[data_formats] . identifier[df_to_dat] ( identifier[self] , identifier[df_dict] , identifier[define_cat_colors] = keyword[True] ) | def load_df(self, df):
"""
Load Pandas DataFrame.
"""
# self.__init__()
self.reset()
df_dict = {}
df_dict['mat'] = deepcopy(df)
# always define category colors if applicable when loading a df
data_formats.df_to_dat(self, df_dict, define_cat_colors=True) |
def drop_table(self, dbname, name, deleteData):
"""
Parameters:
- dbname
- name
- deleteData
"""
self.send_drop_table(dbname, name, deleteData)
self.recv_drop_table() | def function[drop_table, parameter[self, dbname, name, deleteData]]:
constant[
Parameters:
- dbname
- name
- deleteData
]
call[name[self].send_drop_table, parameter[name[dbname], name[name], name[deleteData]]]
call[name[self].recv_drop_table, parameter[]] | keyword[def] identifier[drop_table] ( identifier[self] , identifier[dbname] , identifier[name] , identifier[deleteData] ):
literal[string]
identifier[self] . identifier[send_drop_table] ( identifier[dbname] , identifier[name] , identifier[deleteData] )
identifier[self] . identifier[recv_drop_table] () | def drop_table(self, dbname, name, deleteData):
"""
Parameters:
- dbname
- name
- deleteData
"""
self.send_drop_table(dbname, name, deleteData)
self.recv_drop_table() |
def check_bed_file(fname):
""" Check if the inputfile is a valid bed-file """
if not os.path.exists(fname):
logger.error("Inputfile %s does not exist!", fname)
sys.exit(1)
for i, line in enumerate(open(fname)):
if line.startswith("#") or line.startswith("track") or line.startswith("browser"):
# comment or BED specific stuff
pass
else:
vals = line.strip().split("\t")
if len(vals) < 3:
logger.error("Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s", i + 1, fname)
sys.exit(1)
try:
start, end = int(vals[1]), int(vals[2])
except ValueError:
logger.error("No valid integer coordinates on line %s of file %s", i + 1, fname)
sys.exit(1)
if len(vals) > 3:
try:
float(vals[3])
except ValueError:
pass | def function[check_bed_file, parameter[fname]]:
constant[ Check if the inputfile is a valid bed-file ]
if <ast.UnaryOp object at 0x7da1b10b19f0> begin[:]
call[name[logger].error, parameter[constant[Inputfile %s does not exist!], name[fname]]]
call[name[sys].exit, parameter[constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da1b10b1c60>, <ast.Name object at 0x7da1b10b0d00>]]] in starred[call[name[enumerate], parameter[call[name[open], parameter[name[fname]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b10b0e50> begin[:]
pass | keyword[def] identifier[check_bed_file] ( identifier[fname] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[fname] ):
identifier[logger] . identifier[error] ( literal[string] , identifier[fname] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[open] ( identifier[fname] )):
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ) keyword[or] identifier[line] . identifier[startswith] ( literal[string] ) keyword[or] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[pass]
keyword[else] :
identifier[vals] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[vals] )< literal[int] :
identifier[logger] . identifier[error] ( literal[string] , identifier[i] + literal[int] , identifier[fname] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[try] :
identifier[start] , identifier[end] = identifier[int] ( identifier[vals] [ literal[int] ]), identifier[int] ( identifier[vals] [ literal[int] ])
keyword[except] identifier[ValueError] :
identifier[logger] . identifier[error] ( literal[string] , identifier[i] + literal[int] , identifier[fname] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[len] ( identifier[vals] )> literal[int] :
keyword[try] :
identifier[float] ( identifier[vals] [ literal[int] ])
keyword[except] identifier[ValueError] :
keyword[pass] | def check_bed_file(fname):
""" Check if the inputfile is a valid bed-file """
if not os.path.exists(fname):
logger.error('Inputfile %s does not exist!', fname)
sys.exit(1) # depends on [control=['if'], data=[]]
for (i, line) in enumerate(open(fname)):
if line.startswith('#') or line.startswith('track') or line.startswith('browser'):
# comment or BED specific stuff
pass # depends on [control=['if'], data=[]]
else:
vals = line.strip().split('\t')
if len(vals) < 3:
logger.error('Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s', i + 1, fname)
sys.exit(1) # depends on [control=['if'], data=[]]
try:
(start, end) = (int(vals[1]), int(vals[2])) # depends on [control=['try'], data=[]]
except ValueError:
logger.error('No valid integer coordinates on line %s of file %s', i + 1, fname)
sys.exit(1) # depends on [control=['except'], data=[]]
if len(vals) > 3:
try:
float(vals[3]) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def add_timeframed_query_manager(sender, **kwargs):
"""
Add a QueryManager for a specific timeframe.
"""
if not issubclass(sender, TimeFramedModel):
return
if _field_exists(sender, 'timeframed'):
raise ImproperlyConfigured(
"Model '%s' has a field named 'timeframed' "
"which conflicts with the TimeFramedModel manager."
% sender.__name__
)
sender.add_to_class('timeframed', QueryManager(
(models.Q(start__lte=now) | models.Q(start__isnull=True)) &
(models.Q(end__gte=now) | models.Q(end__isnull=True))
)) | def function[add_timeframed_query_manager, parameter[sender]]:
constant[
Add a QueryManager for a specific timeframe.
]
if <ast.UnaryOp object at 0x7da1b163bf40> begin[:]
return[None]
if call[name[_field_exists], parameter[name[sender], constant[timeframed]]] begin[:]
<ast.Raise object at 0x7da1b163aaa0>
call[name[sender].add_to_class, parameter[constant[timeframed], call[name[QueryManager], parameter[binary_operation[binary_operation[call[name[models].Q, parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[models].Q, parameter[]]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[call[name[models].Q, parameter[]] <ast.BitOr object at 0x7da2590d6aa0> call[name[models].Q, parameter[]]]]]]]] | keyword[def] identifier[add_timeframed_query_manager] ( identifier[sender] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[issubclass] ( identifier[sender] , identifier[TimeFramedModel] ):
keyword[return]
keyword[if] identifier[_field_exists] ( identifier[sender] , literal[string] ):
keyword[raise] identifier[ImproperlyConfigured] (
literal[string]
literal[string]
% identifier[sender] . identifier[__name__]
)
identifier[sender] . identifier[add_to_class] ( literal[string] , identifier[QueryManager] (
( identifier[models] . identifier[Q] ( identifier[start__lte] = identifier[now] )| identifier[models] . identifier[Q] ( identifier[start__isnull] = keyword[True] ))&
( identifier[models] . identifier[Q] ( identifier[end__gte] = identifier[now] )| identifier[models] . identifier[Q] ( identifier[end__isnull] = keyword[True] ))
)) | def add_timeframed_query_manager(sender, **kwargs):
"""
Add a QueryManager for a specific timeframe.
"""
if not issubclass(sender, TimeFramedModel):
return # depends on [control=['if'], data=[]]
if _field_exists(sender, 'timeframed'):
raise ImproperlyConfigured("Model '%s' has a field named 'timeframed' which conflicts with the TimeFramedModel manager." % sender.__name__) # depends on [control=['if'], data=[]]
sender.add_to_class('timeframed', QueryManager((models.Q(start__lte=now) | models.Q(start__isnull=True)) & (models.Q(end__gte=now) | models.Q(end__isnull=True)))) |
def create_zap_package(cls, package_name, recursive=True):
"""Convenience constructor for a package zap rule.
Essentially equivalent to just using ``shading_zap('package_name.**')``.
:param string package_name: Package name to remove (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to remove everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_zap(cls._format_package_glob(package_name, recursive)) | def function[create_zap_package, parameter[cls, package_name, recursive]]:
constant[Convenience constructor for a package zap rule.
Essentially equivalent to just using ``shading_zap('package_name.**')``.
:param string package_name: Package name to remove (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to remove everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
]
return[call[name[cls].create_zap, parameter[call[name[cls]._format_package_glob, parameter[name[package_name], name[recursive]]]]]] | keyword[def] identifier[create_zap_package] ( identifier[cls] , identifier[package_name] , identifier[recursive] = keyword[True] ):
literal[string]
keyword[return] identifier[cls] . identifier[create_zap] ( identifier[cls] . identifier[_format_package_glob] ( identifier[package_name] , identifier[recursive] )) | def create_zap_package(cls, package_name, recursive=True):
"""Convenience constructor for a package zap rule.
Essentially equivalent to just using ``shading_zap('package_name.**')``.
:param string package_name: Package name to remove (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to remove everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_zap(cls._format_package_glob(package_name, recursive)) |
def all(self, instance):
"""Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list
"""
url = self._url.format(instance=instance)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl_list(data) | def function[all, parameter[self, instance]]:
constant[Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list
]
variable[url] assign[=] call[name[self]._url.format, parameter[]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
variable[data] assign[=] call[name[self]._get_response_data, parameter[name[response]]]
return[call[name[self]._concrete_acl_list, parameter[name[data]]]] | keyword[def] identifier[all] ( identifier[self] , identifier[instance] ):
literal[string]
identifier[url] = identifier[self] . identifier[_url] . identifier[format] ( identifier[instance] = identifier[instance] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] ,** identifier[self] . identifier[_default_request_kwargs] )
identifier[data] = identifier[self] . identifier[_get_response_data] ( identifier[response] )
keyword[return] identifier[self] . identifier[_concrete_acl_list] ( identifier[data] ) | def all(self, instance):
"""Get all ACLs associated with the instance specified by name.
:param str instance: The name of the instance from which to fetch ACLs.
:returns: A list of :py:class:`Acl` objects associated with the specified instance.
:rtype: list
"""
url = self._url.format(instance=instance)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl_list(data) |
def visit_Module(self, node):
"""Turn globals assignment to functionDef and visit function defs. """
module_body = list()
symbols = set()
# Gather top level assigned variables.
for stmt in node.body:
if isinstance(stmt, (ast.Import, ast.ImportFrom)):
for alias in stmt.names:
name = alias.asname or alias.name
symbols.add(name) # no warning here
elif isinstance(stmt, ast.FunctionDef):
if stmt.name in symbols:
raise PythranSyntaxError(
"Multiple top-level definition of %s." % stmt.name,
stmt)
else:
symbols.add(stmt.name)
if not isinstance(stmt, ast.Assign):
continue
for target in stmt.targets:
if not isinstance(target, ast.Name):
raise PythranSyntaxError(
"Top-level assignment to an expression.",
target)
if target.id in self.to_expand:
raise PythranSyntaxError(
"Multiple top-level definition of %s." % target.id,
target)
if isinstance(stmt.value, ast.Name):
if stmt.value.id in symbols:
continue # create aliasing between top level symbols
self.to_expand.add(target.id)
for stmt in node.body:
if isinstance(stmt, ast.Assign):
# that's not a global var, but a module/function aliasing
if all(isinstance(t, ast.Name) and t.id not in self.to_expand
for t in stmt.targets):
module_body.append(stmt)
continue
self.local_decl = set()
cst_value = self.visit(stmt.value)
for target in stmt.targets:
assert isinstance(target, ast.Name)
module_body.append(
ast.FunctionDef(target.id,
ast.arguments([], None,
[], [], None, []),
[ast.Return(value=cst_value)],
[], None))
metadata.add(module_body[-1].body[0],
metadata.StaticReturn())
else:
self.local_decl = self.gather(
LocalNameDeclarations, stmt)
module_body.append(self.visit(stmt))
self.update |= bool(self.to_expand)
node.body = module_body
return node | def function[visit_Module, parameter[self, node]]:
constant[Turn globals assignment to functionDef and visit function defs. ]
variable[module_body] assign[=] call[name[list], parameter[]]
variable[symbols] assign[=] call[name[set], parameter[]]
for taget[name[stmt]] in starred[name[node].body] begin[:]
if call[name[isinstance], parameter[name[stmt], tuple[[<ast.Attribute object at 0x7da18dc99e40>, <ast.Attribute object at 0x7da18dc99030>]]]] begin[:]
for taget[name[alias]] in starred[name[stmt].names] begin[:]
variable[name] assign[=] <ast.BoolOp object at 0x7da18dc99cf0>
call[name[symbols].add, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da18dc98400> begin[:]
continue
for taget[name[target]] in starred[name[stmt].targets] begin[:]
if <ast.UnaryOp object at 0x7da18dc9b4f0> begin[:]
<ast.Raise object at 0x7da18dc99f30>
if compare[name[target].id in name[self].to_expand] begin[:]
<ast.Raise object at 0x7da18dc98070>
if call[name[isinstance], parameter[name[stmt].value, name[ast].Name]] begin[:]
if compare[name[stmt].value.id in name[symbols]] begin[:]
continue
call[name[self].to_expand.add, parameter[name[target].id]]
for taget[name[stmt]] in starred[name[node].body] begin[:]
if call[name[isinstance], parameter[name[stmt], name[ast].Assign]] begin[:]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da20c6c5ea0>]] begin[:]
call[name[module_body].append, parameter[name[stmt]]]
continue
name[self].local_decl assign[=] call[name[set], parameter[]]
variable[cst_value] assign[=] call[name[self].visit, parameter[name[stmt].value]]
for taget[name[target]] in starred[name[stmt].targets] begin[:]
assert[call[name[isinstance], parameter[name[target], name[ast].Name]]]
call[name[module_body].append, parameter[call[name[ast].FunctionDef, parameter[name[target].id, call[name[ast].arguments, parameter[list[[]], constant[None], list[[]], list[[]], constant[None], list[[]]]], list[[<ast.Call object at 0x7da20c6c6f80>]], list[[]], constant[None]]]]]
call[name[metadata].add, parameter[call[call[name[module_body]][<ast.UnaryOp object at 0x7da20c6c64d0>].body][constant[0]], call[name[metadata].StaticReturn, parameter[]]]]
<ast.AugAssign object at 0x7da20c6c5a20>
name[node].body assign[=] name[module_body]
return[name[node]] | keyword[def] identifier[visit_Module] ( identifier[self] , identifier[node] ):
literal[string]
identifier[module_body] = identifier[list] ()
identifier[symbols] = identifier[set] ()
keyword[for] identifier[stmt] keyword[in] identifier[node] . identifier[body] :
keyword[if] identifier[isinstance] ( identifier[stmt] ,( identifier[ast] . identifier[Import] , identifier[ast] . identifier[ImportFrom] )):
keyword[for] identifier[alias] keyword[in] identifier[stmt] . identifier[names] :
identifier[name] = identifier[alias] . identifier[asname] keyword[or] identifier[alias] . identifier[name]
identifier[symbols] . identifier[add] ( identifier[name] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[FunctionDef] ):
keyword[if] identifier[stmt] . identifier[name] keyword[in] identifier[symbols] :
keyword[raise] identifier[PythranSyntaxError] (
literal[string] % identifier[stmt] . identifier[name] ,
identifier[stmt] )
keyword[else] :
identifier[symbols] . identifier[add] ( identifier[stmt] . identifier[name] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[Assign] ):
keyword[continue]
keyword[for] identifier[target] keyword[in] identifier[stmt] . identifier[targets] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[target] , identifier[ast] . identifier[Name] ):
keyword[raise] identifier[PythranSyntaxError] (
literal[string] ,
identifier[target] )
keyword[if] identifier[target] . identifier[id] keyword[in] identifier[self] . identifier[to_expand] :
keyword[raise] identifier[PythranSyntaxError] (
literal[string] % identifier[target] . identifier[id] ,
identifier[target] )
keyword[if] identifier[isinstance] ( identifier[stmt] . identifier[value] , identifier[ast] . identifier[Name] ):
keyword[if] identifier[stmt] . identifier[value] . identifier[id] keyword[in] identifier[symbols] :
keyword[continue]
identifier[self] . identifier[to_expand] . identifier[add] ( identifier[target] . identifier[id] )
keyword[for] identifier[stmt] keyword[in] identifier[node] . identifier[body] :
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[ast] . identifier[Assign] ):
keyword[if] identifier[all] ( identifier[isinstance] ( identifier[t] , identifier[ast] . identifier[Name] ) keyword[and] identifier[t] . identifier[id] keyword[not] keyword[in] identifier[self] . identifier[to_expand]
keyword[for] identifier[t] keyword[in] identifier[stmt] . identifier[targets] ):
identifier[module_body] . identifier[append] ( identifier[stmt] )
keyword[continue]
identifier[self] . identifier[local_decl] = identifier[set] ()
identifier[cst_value] = identifier[self] . identifier[visit] ( identifier[stmt] . identifier[value] )
keyword[for] identifier[target] keyword[in] identifier[stmt] . identifier[targets] :
keyword[assert] identifier[isinstance] ( identifier[target] , identifier[ast] . identifier[Name] )
identifier[module_body] . identifier[append] (
identifier[ast] . identifier[FunctionDef] ( identifier[target] . identifier[id] ,
identifier[ast] . identifier[arguments] ([], keyword[None] ,
[],[], keyword[None] ,[]),
[ identifier[ast] . identifier[Return] ( identifier[value] = identifier[cst_value] )],
[], keyword[None] ))
identifier[metadata] . identifier[add] ( identifier[module_body] [- literal[int] ]. identifier[body] [ literal[int] ],
identifier[metadata] . identifier[StaticReturn] ())
keyword[else] :
identifier[self] . identifier[local_decl] = identifier[self] . identifier[gather] (
identifier[LocalNameDeclarations] , identifier[stmt] )
identifier[module_body] . identifier[append] ( identifier[self] . identifier[visit] ( identifier[stmt] ))
identifier[self] . identifier[update] |= identifier[bool] ( identifier[self] . identifier[to_expand] )
identifier[node] . identifier[body] = identifier[module_body]
keyword[return] identifier[node] | def visit_Module(self, node):
"""Turn globals assignment to functionDef and visit function defs. """
module_body = list()
symbols = set()
# Gather top level assigned variables.
for stmt in node.body:
if isinstance(stmt, (ast.Import, ast.ImportFrom)):
for alias in stmt.names:
name = alias.asname or alias.name
symbols.add(name) # no warning here # depends on [control=['for'], data=['alias']] # depends on [control=['if'], data=[]]
elif isinstance(stmt, ast.FunctionDef):
if stmt.name in symbols:
raise PythranSyntaxError('Multiple top-level definition of %s.' % stmt.name, stmt) # depends on [control=['if'], data=[]]
else:
symbols.add(stmt.name) # depends on [control=['if'], data=[]]
if not isinstance(stmt, ast.Assign):
continue # depends on [control=['if'], data=[]]
for target in stmt.targets:
if not isinstance(target, ast.Name):
raise PythranSyntaxError('Top-level assignment to an expression.', target) # depends on [control=['if'], data=[]]
if target.id in self.to_expand:
raise PythranSyntaxError('Multiple top-level definition of %s.' % target.id, target) # depends on [control=['if'], data=[]]
if isinstance(stmt.value, ast.Name):
if stmt.value.id in symbols:
continue # create aliasing between top level symbols # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.to_expand.add(target.id) # depends on [control=['for'], data=['target']] # depends on [control=['for'], data=['stmt']]
for stmt in node.body:
if isinstance(stmt, ast.Assign):
# that's not a global var, but a module/function aliasing
if all((isinstance(t, ast.Name) and t.id not in self.to_expand for t in stmt.targets)):
module_body.append(stmt)
continue # depends on [control=['if'], data=[]]
self.local_decl = set()
cst_value = self.visit(stmt.value)
for target in stmt.targets:
assert isinstance(target, ast.Name)
module_body.append(ast.FunctionDef(target.id, ast.arguments([], None, [], [], None, []), [ast.Return(value=cst_value)], [], None))
metadata.add(module_body[-1].body[0], metadata.StaticReturn()) # depends on [control=['for'], data=['target']] # depends on [control=['if'], data=[]]
else:
self.local_decl = self.gather(LocalNameDeclarations, stmt)
module_body.append(self.visit(stmt)) # depends on [control=['for'], data=['stmt']]
self.update |= bool(self.to_expand)
node.body = module_body
return node |
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/rules/'
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _ETreeXmlToObject.convert_response_to_feeds(
response, _convert_etree_element_to_rule) | def function[list_rules, parameter[self, topic_name, subscription_name]]:
constant[
Retrieves the rules that exist under the specified subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
]
call[name[_validate_not_none], parameter[constant[topic_name], name[topic_name]]]
call[name[_validate_not_none], parameter[constant[subscription_name], name[subscription_name]]]
variable[request] assign[=] call[name[HTTPRequest], parameter[]]
name[request].method assign[=] constant[GET]
name[request].host assign[=] call[name[self]._get_host, parameter[]]
name[request].path assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[/] + call[name[_str], parameter[name[topic_name]]]] + constant[/subscriptions/]] + call[name[_str], parameter[name[subscription_name]]]] + constant[/rules/]]
<ast.Tuple object at 0x7da1b03732b0> assign[=] call[name[self]._httpclient._update_request_uri_query, parameter[name[request]]]
name[request].headers assign[=] call[name[self]._update_service_bus_header, parameter[name[request]]]
variable[response] assign[=] call[name[self]._perform_request, parameter[name[request]]]
return[call[name[_ETreeXmlToObject].convert_response_to_feeds, parameter[name[response], name[_convert_etree_element_to_rule]]]] | keyword[def] identifier[list_rules] ( identifier[self] , identifier[topic_name] , identifier[subscription_name] ):
literal[string]
identifier[_validate_not_none] ( literal[string] , identifier[topic_name] )
identifier[_validate_not_none] ( literal[string] , identifier[subscription_name] )
identifier[request] = identifier[HTTPRequest] ()
identifier[request] . identifier[method] = literal[string]
identifier[request] . identifier[host] = identifier[self] . identifier[_get_host] ()
identifier[request] . identifier[path] = literal[string] + identifier[_str] ( identifier[topic_name] )+ literal[string] + identifier[_str] ( identifier[subscription_name] )+ literal[string]
identifier[request] . identifier[path] , identifier[request] . identifier[query] = identifier[self] . identifier[_httpclient] . identifier[_update_request_uri_query] ( identifier[request] )
identifier[request] . identifier[headers] = identifier[self] . identifier[_update_service_bus_header] ( identifier[request] )
identifier[response] = identifier[self] . identifier[_perform_request] ( identifier[request] )
keyword[return] identifier[_ETreeXmlToObject] . identifier[convert_response_to_feeds] (
identifier[response] , identifier[_convert_etree_element_to_rule] ) | def list_rules(self, topic_name, subscription_name):
"""
Retrieves the rules that exist under the specified subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
"""
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + _str(subscription_name) + '/rules/'
(request.path, request.query) = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _ETreeXmlToObject.convert_response_to_feeds(response, _convert_etree_element_to_rule) |
def __initialize_instance(self):
"""
Take any predefined methods/handlers and insert them into Sanic JWT
"""
config = self.config
# Initialize instance of the Authentication class
self.instance.auth = self.authentication_class(self.app, config=config)
init_handlers = (
handlers if config.auth_mode() else auth_mode_agnostic_handlers
)
for handler in init_handlers:
if handler.keys is None:
self.__check_method_in_auth(handler.name, handler.exception)
else:
if all(map(config.get, handler.keys)):
self.__check_method_in_auth(
handler.name, handler.exception
)
for handler in init_handlers:
if handler.name in self.kwargs:
method = self.kwargs.pop(handler.name)
setattr(self.instance.auth, handler.name, method) | def function[__initialize_instance, parameter[self]]:
constant[
Take any predefined methods/handlers and insert them into Sanic JWT
]
variable[config] assign[=] name[self].config
name[self].instance.auth assign[=] call[name[self].authentication_class, parameter[name[self].app]]
variable[init_handlers] assign[=] <ast.IfExp object at 0x7da18bcc86d0>
for taget[name[handler]] in starred[name[init_handlers]] begin[:]
if compare[name[handler].keys is constant[None]] begin[:]
call[name[self].__check_method_in_auth, parameter[name[handler].name, name[handler].exception]]
for taget[name[handler]] in starred[name[init_handlers]] begin[:]
if compare[name[handler].name in name[self].kwargs] begin[:]
variable[method] assign[=] call[name[self].kwargs.pop, parameter[name[handler].name]]
call[name[setattr], parameter[name[self].instance.auth, name[handler].name, name[method]]] | keyword[def] identifier[__initialize_instance] ( identifier[self] ):
literal[string]
identifier[config] = identifier[self] . identifier[config]
identifier[self] . identifier[instance] . identifier[auth] = identifier[self] . identifier[authentication_class] ( identifier[self] . identifier[app] , identifier[config] = identifier[config] )
identifier[init_handlers] =(
identifier[handlers] keyword[if] identifier[config] . identifier[auth_mode] () keyword[else] identifier[auth_mode_agnostic_handlers]
)
keyword[for] identifier[handler] keyword[in] identifier[init_handlers] :
keyword[if] identifier[handler] . identifier[keys] keyword[is] keyword[None] :
identifier[self] . identifier[__check_method_in_auth] ( identifier[handler] . identifier[name] , identifier[handler] . identifier[exception] )
keyword[else] :
keyword[if] identifier[all] ( identifier[map] ( identifier[config] . identifier[get] , identifier[handler] . identifier[keys] )):
identifier[self] . identifier[__check_method_in_auth] (
identifier[handler] . identifier[name] , identifier[handler] . identifier[exception]
)
keyword[for] identifier[handler] keyword[in] identifier[init_handlers] :
keyword[if] identifier[handler] . identifier[name] keyword[in] identifier[self] . identifier[kwargs] :
identifier[method] = identifier[self] . identifier[kwargs] . identifier[pop] ( identifier[handler] . identifier[name] )
identifier[setattr] ( identifier[self] . identifier[instance] . identifier[auth] , identifier[handler] . identifier[name] , identifier[method] ) | def __initialize_instance(self):
"""
Take any predefined methods/handlers and insert them into Sanic JWT
"""
config = self.config
# Initialize instance of the Authentication class
self.instance.auth = self.authentication_class(self.app, config=config)
init_handlers = handlers if config.auth_mode() else auth_mode_agnostic_handlers
for handler in init_handlers:
if handler.keys is None:
self.__check_method_in_auth(handler.name, handler.exception) # depends on [control=['if'], data=[]]
elif all(map(config.get, handler.keys)):
self.__check_method_in_auth(handler.name, handler.exception) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handler']]
for handler in init_handlers:
if handler.name in self.kwargs:
method = self.kwargs.pop(handler.name)
setattr(self.instance.auth, handler.name, method) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handler']] |
def check_known_schemas(self):
"""\
checks to see if we were able to find the image via known schemas:
Supported Schemas
- Open Graph
- schema.org
"""
if 'image' in self.article.opengraph:
return self.get_image(self.article.opengraph["image"],
extraction_type='opengraph')
elif (self.article.schema and 'image' in self.article.schema and
"url" in self.article.schema["image"]):
return self.get_image(self.article.schema["image"]["url"],
extraction_type='schema.org')
return None | def function[check_known_schemas, parameter[self]]:
constant[ checks to see if we were able to find the image via known schemas:
Supported Schemas
- Open Graph
- schema.org
]
if compare[constant[image] in name[self].article.opengraph] begin[:]
return[call[name[self].get_image, parameter[call[name[self].article.opengraph][constant[image]]]]]
return[constant[None]] | keyword[def] identifier[check_known_schemas] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[article] . identifier[opengraph] :
keyword[return] identifier[self] . identifier[get_image] ( identifier[self] . identifier[article] . identifier[opengraph] [ literal[string] ],
identifier[extraction_type] = literal[string] )
keyword[elif] ( identifier[self] . identifier[article] . identifier[schema] keyword[and] literal[string] keyword[in] identifier[self] . identifier[article] . identifier[schema] keyword[and]
literal[string] keyword[in] identifier[self] . identifier[article] . identifier[schema] [ literal[string] ]):
keyword[return] identifier[self] . identifier[get_image] ( identifier[self] . identifier[article] . identifier[schema] [ literal[string] ][ literal[string] ],
identifier[extraction_type] = literal[string] )
keyword[return] keyword[None] | def check_known_schemas(self):
""" checks to see if we were able to find the image via known schemas:
Supported Schemas
- Open Graph
- schema.org
"""
if 'image' in self.article.opengraph:
return self.get_image(self.article.opengraph['image'], extraction_type='opengraph') # depends on [control=['if'], data=[]]
elif self.article.schema and 'image' in self.article.schema and ('url' in self.article.schema['image']):
return self.get_image(self.article.schema['image']['url'], extraction_type='schema.org') # depends on [control=['if'], data=[]]
return None |
def separate_comma_imports(partitions):
"""Turns `import a, b` into `import a` and `import b`"""
def _inner():
for partition in partitions:
if partition.code_type is CodeType.IMPORT:
import_obj = import_obj_from_str(partition.src)
if import_obj.has_multiple_imports:
for new_import_obj in import_obj.split_imports():
yield CodePartition(
CodeType.IMPORT, new_import_obj.to_text(),
)
else:
yield partition
else:
yield partition
return list(_inner()) | def function[separate_comma_imports, parameter[partitions]]:
constant[Turns `import a, b` into `import a` and `import b`]
def function[_inner, parameter[]]:
for taget[name[partition]] in starred[name[partitions]] begin[:]
if compare[name[partition].code_type is name[CodeType].IMPORT] begin[:]
variable[import_obj] assign[=] call[name[import_obj_from_str], parameter[name[partition].src]]
if name[import_obj].has_multiple_imports begin[:]
for taget[name[new_import_obj]] in starred[call[name[import_obj].split_imports, parameter[]]] begin[:]
<ast.Yield object at 0x7da20e954f40>
return[call[name[list], parameter[call[name[_inner], parameter[]]]]] | keyword[def] identifier[separate_comma_imports] ( identifier[partitions] ):
literal[string]
keyword[def] identifier[_inner] ():
keyword[for] identifier[partition] keyword[in] identifier[partitions] :
keyword[if] identifier[partition] . identifier[code_type] keyword[is] identifier[CodeType] . identifier[IMPORT] :
identifier[import_obj] = identifier[import_obj_from_str] ( identifier[partition] . identifier[src] )
keyword[if] identifier[import_obj] . identifier[has_multiple_imports] :
keyword[for] identifier[new_import_obj] keyword[in] identifier[import_obj] . identifier[split_imports] ():
keyword[yield] identifier[CodePartition] (
identifier[CodeType] . identifier[IMPORT] , identifier[new_import_obj] . identifier[to_text] (),
)
keyword[else] :
keyword[yield] identifier[partition]
keyword[else] :
keyword[yield] identifier[partition]
keyword[return] identifier[list] ( identifier[_inner] ()) | def separate_comma_imports(partitions):
"""Turns `import a, b` into `import a` and `import b`"""
def _inner():
for partition in partitions:
if partition.code_type is CodeType.IMPORT:
import_obj = import_obj_from_str(partition.src)
if import_obj.has_multiple_imports:
for new_import_obj in import_obj.split_imports():
yield CodePartition(CodeType.IMPORT, new_import_obj.to_text()) # depends on [control=['for'], data=['new_import_obj']] # depends on [control=['if'], data=[]]
else:
yield partition # depends on [control=['if'], data=[]]
else:
yield partition # depends on [control=['for'], data=['partition']]
return list(_inner()) |
def set_height(self, height):
""" Set Screen Height """
if height > 0 and height <= self.server.server_info.get("screen_height"):
self.height = height
self.server.request("screen_set %s hgt %i" % (self.ref, self.height)) | def function[set_height, parameter[self, height]]:
constant[ Set Screen Height ]
if <ast.BoolOp object at 0x7da1b0ea19c0> begin[:]
name[self].height assign[=] name[height]
call[name[self].server.request, parameter[binary_operation[constant[screen_set %s hgt %i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0ea0790>, <ast.Attribute object at 0x7da1b0ea0730>]]]]] | keyword[def] identifier[set_height] ( identifier[self] , identifier[height] ):
literal[string]
keyword[if] identifier[height] > literal[int] keyword[and] identifier[height] <= identifier[self] . identifier[server] . identifier[server_info] . identifier[get] ( literal[string] ):
identifier[self] . identifier[height] = identifier[height]
identifier[self] . identifier[server] . identifier[request] ( literal[string] %( identifier[self] . identifier[ref] , identifier[self] . identifier[height] )) | def set_height(self, height):
""" Set Screen Height """
if height > 0 and height <= self.server.server_info.get('screen_height'):
self.height = height
self.server.request('screen_set %s hgt %i' % (self.ref, self.height)) # depends on [control=['if'], data=[]] |
def save_book(self, f_buf=None):
"""
Return a file buffer containing the resulting xls
:param obj f_buf: A file buffer supporting the write and seek
methods
"""
if f_buf is None:
f_buf = StringIO.StringIO()
f_buf.write(openpyxl.writer.excel.save_virtual_workbook(self.book))
f_buf.seek(0)
return f_buf | def function[save_book, parameter[self, f_buf]]:
constant[
Return a file buffer containing the resulting xls
:param obj f_buf: A file buffer supporting the write and seek
methods
]
if compare[name[f_buf] is constant[None]] begin[:]
variable[f_buf] assign[=] call[name[StringIO].StringIO, parameter[]]
call[name[f_buf].write, parameter[call[name[openpyxl].writer.excel.save_virtual_workbook, parameter[name[self].book]]]]
call[name[f_buf].seek, parameter[constant[0]]]
return[name[f_buf]] | keyword[def] identifier[save_book] ( identifier[self] , identifier[f_buf] = keyword[None] ):
literal[string]
keyword[if] identifier[f_buf] keyword[is] keyword[None] :
identifier[f_buf] = identifier[StringIO] . identifier[StringIO] ()
identifier[f_buf] . identifier[write] ( identifier[openpyxl] . identifier[writer] . identifier[excel] . identifier[save_virtual_workbook] ( identifier[self] . identifier[book] ))
identifier[f_buf] . identifier[seek] ( literal[int] )
keyword[return] identifier[f_buf] | def save_book(self, f_buf=None):
"""
Return a file buffer containing the resulting xls
:param obj f_buf: A file buffer supporting the write and seek
methods
"""
if f_buf is None:
f_buf = StringIO.StringIO() # depends on [control=['if'], data=['f_buf']]
f_buf.write(openpyxl.writer.excel.save_virtual_workbook(self.book))
f_buf.seek(0)
return f_buf |
def merge_dicts(base, changes):
"""Merge b into a recursively, without overwriting values.
:param base: the dict that will be altered.
:param changes: changes to update base.
"""
for k, v in changes.items():
if isinstance(v, dict):
merge_dicts(base.setdefault(k, {}), v)
else:
base.setdefault(k, v) | def function[merge_dicts, parameter[base, changes]]:
constant[Merge b into a recursively, without overwriting values.
:param base: the dict that will be altered.
:param changes: changes to update base.
]
for taget[tuple[[<ast.Name object at 0x7da1afe67340>, <ast.Name object at 0x7da1afe675b0>]]] in starred[call[name[changes].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[dict]]] begin[:]
call[name[merge_dicts], parameter[call[name[base].setdefault, parameter[name[k], dictionary[[], []]]], name[v]]] | keyword[def] identifier[merge_dicts] ( identifier[base] , identifier[changes] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[changes] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[merge_dicts] ( identifier[base] . identifier[setdefault] ( identifier[k] ,{}), identifier[v] )
keyword[else] :
identifier[base] . identifier[setdefault] ( identifier[k] , identifier[v] ) | def merge_dicts(base, changes):
"""Merge b into a recursively, without overwriting values.
:param base: the dict that will be altered.
:param changes: changes to update base.
"""
for (k, v) in changes.items():
if isinstance(v, dict):
merge_dicts(base.setdefault(k, {}), v) # depends on [control=['if'], data=[]]
else:
base.setdefault(k, v) # depends on [control=['for'], data=[]] |
def send_signal(self, backend, signal):
"""
Sends the `signal` signal to `backend`. Raises ValueError if `backend`
is not registered with the client. Returns the result.
"""
backend = self._expand_host(backend)
if backend in self.backends:
try:
return self._work(backend, self._package(signal), log=False)
except socket.error:
raise BackendNotAvailableError
else:
raise ValueError('No such backend!') | def function[send_signal, parameter[self, backend, signal]]:
constant[
Sends the `signal` signal to `backend`. Raises ValueError if `backend`
is not registered with the client. Returns the result.
]
variable[backend] assign[=] call[name[self]._expand_host, parameter[name[backend]]]
if compare[name[backend] in name[self].backends] begin[:]
<ast.Try object at 0x7da1b1351a80> | keyword[def] identifier[send_signal] ( identifier[self] , identifier[backend] , identifier[signal] ):
literal[string]
identifier[backend] = identifier[self] . identifier[_expand_host] ( identifier[backend] )
keyword[if] identifier[backend] keyword[in] identifier[self] . identifier[backends] :
keyword[try] :
keyword[return] identifier[self] . identifier[_work] ( identifier[backend] , identifier[self] . identifier[_package] ( identifier[signal] ), identifier[log] = keyword[False] )
keyword[except] identifier[socket] . identifier[error] :
keyword[raise] identifier[BackendNotAvailableError]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def send_signal(self, backend, signal):
"""
Sends the `signal` signal to `backend`. Raises ValueError if `backend`
is not registered with the client. Returns the result.
"""
backend = self._expand_host(backend)
if backend in self.backends:
try:
return self._work(backend, self._package(signal), log=False) # depends on [control=['try'], data=[]]
except socket.error:
raise BackendNotAvailableError # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['backend']]
else:
raise ValueError('No such backend!') |
def domain_delete(domain, logger, filesystem):
"""libvirt domain undefinition.
@raise: libvirt.libvirtError.
"""
if domain is not None:
try:
if domain.isActive():
domain.destroy()
except libvirt.libvirtError:
logger.exception("Unable to destroy the domain.")
try:
domain.undefine()
except libvirt.libvirtError:
logger.exception("Unable to undefine the domain.")
try:
if filesystem is not None and os.path.exists(filesystem):
shutil.rmtree(filesystem)
except Exception:
logger.exception("Unable to remove the shared folder.") | def function[domain_delete, parameter[domain, logger, filesystem]]:
constant[libvirt domain undefinition.
@raise: libvirt.libvirtError.
]
if compare[name[domain] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b26af220>
<ast.Try object at 0x7da1b26aefe0>
<ast.Try object at 0x7da1b26acbb0> | keyword[def] identifier[domain_delete] ( identifier[domain] , identifier[logger] , identifier[filesystem] ):
literal[string]
keyword[if] identifier[domain] keyword[is] keyword[not] keyword[None] :
keyword[try] :
keyword[if] identifier[domain] . identifier[isActive] ():
identifier[domain] . identifier[destroy] ()
keyword[except] identifier[libvirt] . identifier[libvirtError] :
identifier[logger] . identifier[exception] ( literal[string] )
keyword[try] :
identifier[domain] . identifier[undefine] ()
keyword[except] identifier[libvirt] . identifier[libvirtError] :
identifier[logger] . identifier[exception] ( literal[string] )
keyword[try] :
keyword[if] identifier[filesystem] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[filesystem] ):
identifier[shutil] . identifier[rmtree] ( identifier[filesystem] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] ) | def domain_delete(domain, logger, filesystem):
"""libvirt domain undefinition.
@raise: libvirt.libvirtError.
"""
if domain is not None:
try:
if domain.isActive():
domain.destroy() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except libvirt.libvirtError:
logger.exception('Unable to destroy the domain.') # depends on [control=['except'], data=[]]
try:
domain.undefine() # depends on [control=['try'], data=[]]
except libvirt.libvirtError:
logger.exception('Unable to undefine the domain.') # depends on [control=['except'], data=[]]
try:
if filesystem is not None and os.path.exists(filesystem):
shutil.rmtree(filesystem) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
logger.exception('Unable to remove the shared folder.') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['domain']] |
def parse_headers(obj):
"""Parse a string a iterable object (including file like objects) to a
python dictionary.
Args:
obj: An iterable object including file-like objects.
Returns:
An dictionary of headers. If a header is repeated then the last value
for that header is given.
Raises:
ValueError: If the first line is a continuation line or the headers
cannot be parsed.
"""
if isinstance(obj, basestring):
obj = cStringIO.StringIO(obj)
hdrs = []
for line in obj:
hdr = parse_header(line)
if not hdr:
break
if isinstance(hdr, basestring):
if not hdrs:
raise ValueError("First header is a continuation")
hdrs[-1] = (hdrs[-1][0], hdrs[-1][1] + hdr)
continue
hdrs.append(hdr)
return iodict.IODict(hdrs) | def function[parse_headers, parameter[obj]]:
constant[Parse a string a iterable object (including file like objects) to a
python dictionary.
Args:
obj: An iterable object including file-like objects.
Returns:
An dictionary of headers. If a header is repeated then the last value
for that header is given.
Raises:
ValueError: If the first line is a continuation line or the headers
cannot be parsed.
]
if call[name[isinstance], parameter[name[obj], name[basestring]]] begin[:]
variable[obj] assign[=] call[name[cStringIO].StringIO, parameter[name[obj]]]
variable[hdrs] assign[=] list[[]]
for taget[name[line]] in starred[name[obj]] begin[:]
variable[hdr] assign[=] call[name[parse_header], parameter[name[line]]]
if <ast.UnaryOp object at 0x7da1b0145ed0> begin[:]
break
if call[name[isinstance], parameter[name[hdr], name[basestring]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0145f90> begin[:]
<ast.Raise object at 0x7da1b0146f20>
call[name[hdrs]][<ast.UnaryOp object at 0x7da1b0145cc0>] assign[=] tuple[[<ast.Subscript object at 0x7da1b0146080>, <ast.BinOp object at 0x7da1b0145b70>]]
continue
call[name[hdrs].append, parameter[name[hdr]]]
return[call[name[iodict].IODict, parameter[name[hdrs]]]] | keyword[def] identifier[parse_headers] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[basestring] ):
identifier[obj] = identifier[cStringIO] . identifier[StringIO] ( identifier[obj] )
identifier[hdrs] =[]
keyword[for] identifier[line] keyword[in] identifier[obj] :
identifier[hdr] = identifier[parse_header] ( identifier[line] )
keyword[if] keyword[not] identifier[hdr] :
keyword[break]
keyword[if] identifier[isinstance] ( identifier[hdr] , identifier[basestring] ):
keyword[if] keyword[not] identifier[hdrs] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[hdrs] [- literal[int] ]=( identifier[hdrs] [- literal[int] ][ literal[int] ], identifier[hdrs] [- literal[int] ][ literal[int] ]+ identifier[hdr] )
keyword[continue]
identifier[hdrs] . identifier[append] ( identifier[hdr] )
keyword[return] identifier[iodict] . identifier[IODict] ( identifier[hdrs] ) | def parse_headers(obj):
"""Parse a string a iterable object (including file like objects) to a
python dictionary.
Args:
obj: An iterable object including file-like objects.
Returns:
An dictionary of headers. If a header is repeated then the last value
for that header is given.
Raises:
ValueError: If the first line is a continuation line or the headers
cannot be parsed.
"""
if isinstance(obj, basestring):
obj = cStringIO.StringIO(obj) # depends on [control=['if'], data=[]]
hdrs = []
for line in obj:
hdr = parse_header(line)
if not hdr:
break # depends on [control=['if'], data=[]]
if isinstance(hdr, basestring):
if not hdrs:
raise ValueError('First header is a continuation') # depends on [control=['if'], data=[]]
hdrs[-1] = (hdrs[-1][0], hdrs[-1][1] + hdr)
continue # depends on [control=['if'], data=[]]
hdrs.append(hdr) # depends on [control=['for'], data=['line']]
return iodict.IODict(hdrs) |
def is_compatible_space(space, base_space):
"""Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
"""
if isinstance(base_space, ProductSpace):
return False
if isinstance(space, ProductSpace):
if not space.is_power_space:
return False
elif len(space) == 0:
return False
else:
return is_compatible_space(space[0], base_space)
else:
if hasattr(space, 'astype') and hasattr(base_space, 'dtype'):
# TODO: maybe only the shape should play a role?
comp_space = space.astype(base_space.dtype)
else:
comp_space = space
return comp_space == base_space | def function[is_compatible_space, parameter[space, base_space]]:
constant[Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
]
if call[name[isinstance], parameter[name[base_space], name[ProductSpace]]] begin[:]
return[constant[False]]
if call[name[isinstance], parameter[name[space], name[ProductSpace]]] begin[:]
if <ast.UnaryOp object at 0x7da1b20b5d50> begin[:]
return[constant[False]] | keyword[def] identifier[is_compatible_space] ( identifier[space] , identifier[base_space] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[base_space] , identifier[ProductSpace] ):
keyword[return] keyword[False]
keyword[if] identifier[isinstance] ( identifier[space] , identifier[ProductSpace] ):
keyword[if] keyword[not] identifier[space] . identifier[is_power_space] :
keyword[return] keyword[False]
keyword[elif] identifier[len] ( identifier[space] )== literal[int] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[is_compatible_space] ( identifier[space] [ literal[int] ], identifier[base_space] )
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[space] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[base_space] , literal[string] ):
identifier[comp_space] = identifier[space] . identifier[astype] ( identifier[base_space] . identifier[dtype] )
keyword[else] :
identifier[comp_space] = identifier[space]
keyword[return] identifier[comp_space] == identifier[base_space] | def is_compatible_space(space, base_space):
"""Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
"""
if isinstance(base_space, ProductSpace):
return False # depends on [control=['if'], data=[]]
if isinstance(space, ProductSpace):
if not space.is_power_space:
return False # depends on [control=['if'], data=[]]
elif len(space) == 0:
return False # depends on [control=['if'], data=[]]
else:
return is_compatible_space(space[0], base_space) # depends on [control=['if'], data=[]]
else:
if hasattr(space, 'astype') and hasattr(base_space, 'dtype'):
# TODO: maybe only the shape should play a role?
comp_space = space.astype(base_space.dtype) # depends on [control=['if'], data=[]]
else:
comp_space = space
return comp_space == base_space |
def check_freshness(self, ts):
'''
Get all ledger IDs for which
A) not updated for more than Freshness Timeout
B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout
Should be called whenever we need to decide if ledgers need to be updated.
:param ts: the current time check the freshness against
:return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest)
and then by ledger ID (in case of equal update time)
'''
outdated_ledgers = {}
for ledger_id, freshness_state in self._ledger_freshness.items():
if ts - freshness_state.last_updated <= self.freshness_timeout:
continue
if ts - freshness_state.last_marked_as_outdated <= self.freshness_timeout:
continue
outdated_ledgers[ledger_id] = ts - freshness_state.last_updated
freshness_state.last_marked_as_outdated = ts
# sort by last update time and then by ledger_id
return OrderedDict(
sorted(
outdated_ledgers.items(),
key=lambda item: (-item[1], item[0])
)
) | def function[check_freshness, parameter[self, ts]]:
constant[
Get all ledger IDs for which
A) not updated for more than Freshness Timeout
B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout
Should be called whenever we need to decide if ledgers need to be updated.
:param ts: the current time check the freshness against
:return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest)
and then by ledger ID (in case of equal update time)
]
variable[outdated_ledgers] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18dc9a920>, <ast.Name object at 0x7da18dc981c0>]]] in starred[call[name[self]._ledger_freshness.items, parameter[]]] begin[:]
if compare[binary_operation[name[ts] - name[freshness_state].last_updated] less_or_equal[<=] name[self].freshness_timeout] begin[:]
continue
if compare[binary_operation[name[ts] - name[freshness_state].last_marked_as_outdated] less_or_equal[<=] name[self].freshness_timeout] begin[:]
continue
call[name[outdated_ledgers]][name[ledger_id]] assign[=] binary_operation[name[ts] - name[freshness_state].last_updated]
name[freshness_state].last_marked_as_outdated assign[=] name[ts]
return[call[name[OrderedDict], parameter[call[name[sorted], parameter[call[name[outdated_ledgers].items, parameter[]]]]]]] | keyword[def] identifier[check_freshness] ( identifier[self] , identifier[ts] ):
literal[string]
identifier[outdated_ledgers] ={}
keyword[for] identifier[ledger_id] , identifier[freshness_state] keyword[in] identifier[self] . identifier[_ledger_freshness] . identifier[items] ():
keyword[if] identifier[ts] - identifier[freshness_state] . identifier[last_updated] <= identifier[self] . identifier[freshness_timeout] :
keyword[continue]
keyword[if] identifier[ts] - identifier[freshness_state] . identifier[last_marked_as_outdated] <= identifier[self] . identifier[freshness_timeout] :
keyword[continue]
identifier[outdated_ledgers] [ identifier[ledger_id] ]= identifier[ts] - identifier[freshness_state] . identifier[last_updated]
identifier[freshness_state] . identifier[last_marked_as_outdated] = identifier[ts]
keyword[return] identifier[OrderedDict] (
identifier[sorted] (
identifier[outdated_ledgers] . identifier[items] (),
identifier[key] = keyword[lambda] identifier[item] :(- identifier[item] [ literal[int] ], identifier[item] [ literal[int] ])
)
) | def check_freshness(self, ts):
"""
Get all ledger IDs for which
A) not updated for more than Freshness Timeout
B) hasn't been attempted to update (returned from this method) for more than Freshness Timeout
Should be called whenever we need to decide if ledgers need to be updated.
:param ts: the current time check the freshness against
:return: an ordered dict of outdated ledgers sorted by the time from the last update (from oldest to newest)
and then by ledger ID (in case of equal update time)
"""
outdated_ledgers = {}
for (ledger_id, freshness_state) in self._ledger_freshness.items():
if ts - freshness_state.last_updated <= self.freshness_timeout:
continue # depends on [control=['if'], data=[]]
if ts - freshness_state.last_marked_as_outdated <= self.freshness_timeout:
continue # depends on [control=['if'], data=[]]
outdated_ledgers[ledger_id] = ts - freshness_state.last_updated
freshness_state.last_marked_as_outdated = ts # depends on [control=['for'], data=[]]
# sort by last update time and then by ledger_id
return OrderedDict(sorted(outdated_ledgers.items(), key=lambda item: (-item[1], item[0]))) |
def add_graph_to_namespace(self, graph):
"""
Adds the variables name to the namespace of the local LISP code
:param graph: the graph to add to the namespace
:return: None
"""
for node in graph.vs:
attributes = node.attributes()
self.namespace[node['name']] = attributes
for node in graph.es:
attributes = node.attributes()
self.namespace[node['name']] = attributes | def function[add_graph_to_namespace, parameter[self, graph]]:
constant[
Adds the variables name to the namespace of the local LISP code
:param graph: the graph to add to the namespace
:return: None
]
for taget[name[node]] in starred[name[graph].vs] begin[:]
variable[attributes] assign[=] call[name[node].attributes, parameter[]]
call[name[self].namespace][call[name[node]][constant[name]]] assign[=] name[attributes]
for taget[name[node]] in starred[name[graph].es] begin[:]
variable[attributes] assign[=] call[name[node].attributes, parameter[]]
call[name[self].namespace][call[name[node]][constant[name]]] assign[=] name[attributes] | keyword[def] identifier[add_graph_to_namespace] ( identifier[self] , identifier[graph] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[graph] . identifier[vs] :
identifier[attributes] = identifier[node] . identifier[attributes] ()
identifier[self] . identifier[namespace] [ identifier[node] [ literal[string] ]]= identifier[attributes]
keyword[for] identifier[node] keyword[in] identifier[graph] . identifier[es] :
identifier[attributes] = identifier[node] . identifier[attributes] ()
identifier[self] . identifier[namespace] [ identifier[node] [ literal[string] ]]= identifier[attributes] | def add_graph_to_namespace(self, graph):
"""
Adds the variables name to the namespace of the local LISP code
:param graph: the graph to add to the namespace
:return: None
"""
for node in graph.vs:
attributes = node.attributes()
self.namespace[node['name']] = attributes # depends on [control=['for'], data=['node']]
for node in graph.es:
attributes = node.attributes()
self.namespace[node['name']] = attributes # depends on [control=['for'], data=['node']] |
def rmon_event_entry_event_owner(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon")
event_entry = ET.SubElement(rmon, "event-entry")
event_index_key = ET.SubElement(event_entry, "event-index")
event_index_key.text = kwargs.pop('event_index')
event_owner = ET.SubElement(event_entry, "event-owner")
event_owner.text = kwargs.pop('event_owner')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[rmon_event_entry_event_owner, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[rmon] assign[=] call[name[ET].SubElement, parameter[name[config], constant[rmon]]]
variable[event_entry] assign[=] call[name[ET].SubElement, parameter[name[rmon], constant[event-entry]]]
variable[event_index_key] assign[=] call[name[ET].SubElement, parameter[name[event_entry], constant[event-index]]]
name[event_index_key].text assign[=] call[name[kwargs].pop, parameter[constant[event_index]]]
variable[event_owner] assign[=] call[name[ET].SubElement, parameter[name[event_entry], constant[event-owner]]]
name[event_owner].text assign[=] call[name[kwargs].pop, parameter[constant[event_owner]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[rmon_event_entry_event_owner] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[rmon] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[event_entry] = identifier[ET] . identifier[SubElement] ( identifier[rmon] , literal[string] )
identifier[event_index_key] = identifier[ET] . identifier[SubElement] ( identifier[event_entry] , literal[string] )
identifier[event_index_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[event_owner] = identifier[ET] . identifier[SubElement] ( identifier[event_entry] , literal[string] )
identifier[event_owner] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def rmon_event_entry_event_owner(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
rmon = ET.SubElement(config, 'rmon', xmlns='urn:brocade.com:mgmt:brocade-rmon')
event_entry = ET.SubElement(rmon, 'event-entry')
event_index_key = ET.SubElement(event_entry, 'event-index')
event_index_key.text = kwargs.pop('event_index')
event_owner = ET.SubElement(event_entry, 'event-owner')
event_owner.text = kwargs.pop('event_owner')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg)
# There is no Node for this path name, and we're allowed
# to create it.
dir_name, file_name = p.rsplit('/',1)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result | def function[_lookup_abs, parameter[self, p, klass, create]]:
constant[
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
]
variable[k] assign[=] call[name[_my_normcase], parameter[name[p]]]
<ast.Try object at 0x7da204345360>
return[name[result]] | keyword[def] identifier[_lookup_abs] ( identifier[self] , identifier[p] , identifier[klass] , identifier[create] = literal[int] ):
literal[string]
identifier[k] = identifier[_my_normcase] ( identifier[p] )
keyword[try] :
identifier[result] = identifier[self] . identifier[_lookupDict] [ identifier[k] ]
keyword[except] identifier[KeyError] :
keyword[if] keyword[not] identifier[create] :
identifier[msg] = literal[string] %( identifier[p] , identifier[str] ( identifier[self] ))
keyword[raise] identifier[SCons] . identifier[Errors] . identifier[UserError] ( identifier[msg] )
identifier[dir_name] , identifier[file_name] = identifier[p] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[dir_node] = identifier[self] . identifier[_lookup_abs] ( identifier[dir_name] , identifier[Dir] )
identifier[result] = identifier[klass] ( identifier[file_name] , identifier[dir_node] , identifier[self] . identifier[fs] )
identifier[result] . identifier[diskcheck_match] ()
identifier[self] . identifier[_lookupDict] [ identifier[k] ]= identifier[result]
identifier[dir_node] . identifier[entries] [ identifier[_my_normcase] ( identifier[file_name] )]= identifier[result]
identifier[dir_node] . identifier[implicit] = keyword[None]
keyword[else] :
identifier[result] . identifier[must_be_same] ( identifier[klass] )
keyword[return] identifier[result] | def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k] # depends on [control=['try'], data=[]]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg) # depends on [control=['if'], data=[]]
# There is no Node for this path name, and we're allowed
# to create it.
(dir_name, file_name) = p.rsplit('/', 1)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None # depends on [control=['except'], data=[]]
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result |
def add_prefix(dict_like, prefix):
"""
takes a dict (or dict-like object, e.g. etree._Attrib) and adds the
given prefix to each key. Always returns a dict (via a typecast).
Parameters
----------
dict_like : dict (or similar)
a dictionary or a container that implements .items()
prefix : str
the prefix string to be prepended to each key in the input dict
Returns
-------
prefixed_dict : dict
A dict, in which each key begins with the given prefix.
"""
if not isinstance(dict_like, dict):
try:
dict_like = dict(dict_like)
except Exception as e:
raise ValueError("{0}\nCan't convert container to dict: "
"{1}".format(e, dict_like))
return {prefix + k: v for (k, v) in dict_like.items()} | def function[add_prefix, parameter[dict_like, prefix]]:
constant[
takes a dict (or dict-like object, e.g. etree._Attrib) and adds the
given prefix to each key. Always returns a dict (via a typecast).
Parameters
----------
dict_like : dict (or similar)
a dictionary or a container that implements .items()
prefix : str
the prefix string to be prepended to each key in the input dict
Returns
-------
prefixed_dict : dict
A dict, in which each key begins with the given prefix.
]
if <ast.UnaryOp object at 0x7da2046225f0> begin[:]
<ast.Try object at 0x7da204623f70>
return[<ast.DictComp object at 0x7da18c4cf9a0>] | keyword[def] identifier[add_prefix] ( identifier[dict_like] , identifier[prefix] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[dict_like] , identifier[dict] ):
keyword[try] :
identifier[dict_like] = identifier[dict] ( identifier[dict_like] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[e] , identifier[dict_like] ))
keyword[return] { identifier[prefix] + identifier[k] : identifier[v] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[dict_like] . identifier[items] ()} | def add_prefix(dict_like, prefix):
"""
takes a dict (or dict-like object, e.g. etree._Attrib) and adds the
given prefix to each key. Always returns a dict (via a typecast).
Parameters
----------
dict_like : dict (or similar)
a dictionary or a container that implements .items()
prefix : str
the prefix string to be prepended to each key in the input dict
Returns
-------
prefixed_dict : dict
A dict, in which each key begins with the given prefix.
"""
if not isinstance(dict_like, dict):
try:
dict_like = dict(dict_like) # depends on [control=['try'], data=[]]
except Exception as e:
raise ValueError("{0}\nCan't convert container to dict: {1}".format(e, dict_like)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
return {prefix + k: v for (k, v) in dict_like.items()} |
async def _get(self, term: str = None, random: bool = False) -> dict:
"""Helper method to reduce some boilerplate with :module:`aiohttp`.
Args:
term: The term to search for. Optional if doing a random search.
random: Whether the search should return a random word.
Returns:
The JSON response from the API.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
"""
params = None
if random:
url = self.RANDOM_URL
else:
params = {'term': term}
url = self.API_URL
async with self.session.get(url, params=params) as response:
if response.status == 200:
response = await response.json()
else:
raise UrbanConnectionError(response.status)
if not response['list']:
raise WordNotFoundError(term)
return response | <ast.AsyncFunctionDef object at 0x7da20e9559f0> | keyword[async] keyword[def] identifier[_get] ( identifier[self] , identifier[term] : identifier[str] = keyword[None] , identifier[random] : identifier[bool] = keyword[False] )-> identifier[dict] :
literal[string]
identifier[params] = keyword[None]
keyword[if] identifier[random] :
identifier[url] = identifier[self] . identifier[RANDOM_URL]
keyword[else] :
identifier[params] ={ literal[string] : identifier[term] }
identifier[url] = identifier[self] . identifier[API_URL]
keyword[async] keyword[with] identifier[self] . identifier[session] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] ) keyword[as] identifier[response] :
keyword[if] identifier[response] . identifier[status] == literal[int] :
identifier[response] = keyword[await] identifier[response] . identifier[json] ()
keyword[else] :
keyword[raise] identifier[UrbanConnectionError] ( identifier[response] . identifier[status] )
keyword[if] keyword[not] identifier[response] [ literal[string] ]:
keyword[raise] identifier[WordNotFoundError] ( identifier[term] )
keyword[return] identifier[response] | async def _get(self, term: str=None, random: bool=False) -> dict:
"""Helper method to reduce some boilerplate with :module:`aiohttp`.
Args:
term: The term to search for. Optional if doing a random search.
random: Whether the search should return a random word.
Returns:
The JSON response from the API.
Raises:
UrbanConnectionError: If the response status isn't ``200``.
WordNotFoundError: If the response doesn't contain data (i.e. no word found).
"""
params = None
if random:
url = self.RANDOM_URL # depends on [control=['if'], data=[]]
else:
params = {'term': term}
url = self.API_URL
async with self.session.get(url, params=params) as response:
if response.status == 200:
response = await response.json() # depends on [control=['if'], data=[]]
else:
raise UrbanConnectionError(response.status)
if not response['list']:
raise WordNotFoundError(term) # depends on [control=['if'], data=[]]
return response |
def set_image(self, image):
"""
Sets the IOS image for this router.
There is no default.
:param image: path to IOS image file
"""
image = self.manager.get_abs_image_path(image)
yield from self._hypervisor.send('vm set_ios "{name}" "{image}"'.format(name=self._name, image=image))
log.info('Router "{name}" [{id}]: has a new IOS image set: "{image}"'.format(name=self._name,
id=self._id,
image=image))
self._image = image | def function[set_image, parameter[self, image]]:
constant[
Sets the IOS image for this router.
There is no default.
:param image: path to IOS image file
]
variable[image] assign[=] call[name[self].manager.get_abs_image_path, parameter[name[image]]]
<ast.YieldFrom object at 0x7da18eb564d0>
call[name[log].info, parameter[call[constant[Router "{name}" [{id}]: has a new IOS image set: "{image}"].format, parameter[]]]]
name[self]._image assign[=] name[image] | keyword[def] identifier[set_image] ( identifier[self] , identifier[image] ):
literal[string]
identifier[image] = identifier[self] . identifier[manager] . identifier[get_abs_image_path] ( identifier[image] )
keyword[yield] keyword[from] identifier[self] . identifier[_hypervisor] . identifier[send] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[image] = identifier[image] ))
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ,
identifier[id] = identifier[self] . identifier[_id] ,
identifier[image] = identifier[image] ))
identifier[self] . identifier[_image] = identifier[image] | def set_image(self, image):
"""
Sets the IOS image for this router.
There is no default.
:param image: path to IOS image file
"""
image = self.manager.get_abs_image_path(image)
yield from self._hypervisor.send('vm set_ios "{name}" "{image}"'.format(name=self._name, image=image))
log.info('Router "{name}" [{id}]: has a new IOS image set: "{image}"'.format(name=self._name, id=self._id, image=image))
self._image = image |
def _rest_make_phenotypes():
#phenotype sources
neuroner = Path(devconfig.git_local_base,
'neuroNER/resources/bluima/neuroner/hbp_morphology_ontology.obo').as_posix()
neuroner1 = Path(devconfig.git_local_base,
'neuroNER/resources/bluima/neuroner/hbp_electrophysiology_ontology.obo').as_posix()
neuroner2 = Path(devconfig.git_local_base,
'neuroNER/resources/bluima/neuroner/hbp_electrophysiology-triggers_ontology.obo').as_posix()
nif_qual = Path(devconfig.ontology_local_repo,
'ttl/NIF-Quality.ttl').as_posix()
mo = OboFile(os.path.expanduser(neuroner))
mo1 = OboFile(os.path.expanduser(neuroner1))
mo2 = OboFile(os.path.expanduser(neuroner2))
mo_ttl = mo.__ttl__() + mo1.__ttl__() + mo2.__ttl__()
mo_ttl = """\
@prefix : <http://FIXME.org/> .
@prefix nsu: <http://www.FIXME.org/nsupper#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
""" + mo_ttl
#sio = io.StringIO()
#sio.write(mo_ttl)
ng = rdflib.Graph()
ng.parse(data=mo_ttl, format='turtle')
ng.parse(os.path.expanduser(nif_qual), format='turtle')
#ng.namespace_manager.bind('default1', None, override=False, replace=True)
ng.remove((None, rdflib.OWL.imports, None))
bad_match = {
'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090505',
'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1693353776',
'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1288413465',
'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao4459136323',
'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090507',
}
exact = []
similar = []
quals = []
s2 = {}
for subject, label in sorted(ng.subject_objects(rdflib.RDFS.label)):
syns = set([a for a in ng.objects(subject, rdflib.URIRef('http://www.FIXME.org/nsupper#synonym'))])
syns.update(set([a for a in ng.objects(subject, rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/OBO_annotation_properties.owl#synonym'))]))
#if syns:
#print(syns)
#print(subject)
#print(label.lower())
if 'quality' in label.lower():
quals.append((subject, label))
subpre = ng.namespace_manager.compute_qname(subject)[1]
llower = rdflib.Literal(label.lower(), lang='en')
for s in ng.subjects(rdflib.RDFS.label, llower):
if s != subject:
exact.append((subject, s, label, llower))
for s, p, o in sorted(ng.triples((None, rdflib.RDFS.label, None))):
spre = ng.namespace_manager.compute_qname(s)[1]
if subject != s and label.lower() in o.lower().split(' ') and spre != subpre:
if s.toPython() in bad_match or subject.toPython() in bad_match:
continue
#print()
#print(spre, subpre)
similar.append((subject, s, label, o))
if subpre.toPython() == 'http://FIXME.org/':
print('YAY')
print(label, ',', o)
print(subject, s)
subject, s = s, subject
label, o = o, label
if subject in s2:
#print('YES IT EXISTS')
#print(syns, label, [subject, s])
s2[subject]['syns'].update(syns)
s2[subject]['syns'].add(label)
s2[subject]['xrefs'] += [subject, s]
else:
s2[subject] = {'label': label.toPython(), 'o': o.toPython(), 'xrefs':[subject, s], 'syns':syns} # FIXME overwrites
pprint(quals)
""" print stuff
print('matches')
pprint(exact)
pprint(similar)
#print('EXACT', exact)
print()
for k, v in s2.items():
print(k)
for k, v2 in sorted(v.items()):
print(' ', k, ':', v2)
#"""
desired_nif_terms = set() #{
#'NIFQUAL:sao1959705051', # dendrite
#'NIFQUAL:sao2088691397', # axon
#'NIFQUAL:sao1057800815', # morphological
#'NIFQUAL:sao-1126011106', # soma
#'NIFQUAL:',
#'NIFQUAL:',
#}
starts = [
#"NIFQUAL:sao2088691397",
#"NIFQUAL:sao1278200674",
#"NIFQUAL:sao2088691397",
#"NIFQUAL:sao-1126011106", # FIXME WTF IS THIS NONSENSE (scigraph bug?)
quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1959705051").replace('/','%2F'),
quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397").replace('/','%2F'),
quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1278200674").replace('/','%2F'),
quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397").replace('/','%2F'),
quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao-1126011106").replace('/','%2F'),
]
for id_ in starts:
want = sgg.getNeighbors(id_, relationshipType='subClassOf', direction='INCOMING', depth=5)
#print(id_, want)
desired_nif_terms.update([n['id'] for n in want['nodes']])
print(desired_nif_terms)
ilx_start = 50114
print(ilx_base.format(ilx_start))
new_terms = {}
dg = makeGraph('uwotm8', prefixes=PREFIXES)
xr = makeGraph('xrefs', prefixes=PREFIXES)
for s, o in sorted(ng.subject_objects(rdflib.RDFS.label))[::-1]:
spre = ng.namespace_manager.compute_qname(s)[1]
#if spre.toPython() == g.namespaces['NIFQUAL']:
#print('skipping', s)
#continue # TODO
if s in new_terms:
print(s, 'already in as xref probably')
continue
#elif spre.toPython() != 'http://uri.interlex.org/base/ilx_' or spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms:
#elif spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms:
#print('DO NOT WANT', s, spre)
#continue
syns = set([s for s in ng.objects(s, dg.namespaces['nsu']['synonym'])])
#data['syns'] += syns
data = {}
id_ = ilx_base.format(ilx_start)
ilx_start += 1
if s in s2:
d = s2[s]
syns.update(d['syns'])
new_terms[d['xrefs'][0]] = {'replaced_by':id_}
xr.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_)
#dg.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_)
new_terms[d['xrefs'][1]] = {'replaced_by':id_}
xr.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_)
#dg.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_)
data['labels'] = [d['label'], d['o']]
#dg.add_trip(id_, rdflib.RDFS.label, d['label'])
dg.add_trip(id_, rdflib.RDFS.label, d['o'])
data['xrefs'] = d['xrefs']
for x in d['xrefs']: # FIXME... expecting order of evaluation errors here...
dg.add_trip(id_, 'oboInOwl:hasDbXref', x) # xr
xr.add_trip(id_, 'oboInOwl:hasDbXref', x) # x
elif spre.toPython() != 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#' or ng.namespace_manager.qname(s).replace('default1','NIFQUAL') in desired_nif_terms: # skip non-xref quals
#print(ng.namespace_manager.qname(s).replace('default1','NIFQUAL'))
new_terms[s] = {'replaced_by':id_}
xr.add_trip(s, 'oboInOwl:replacedBy', id_)
data['labels'] = [o.toPython()]
dg.add_trip(id_, rdflib.RDFS.label, o.toPython())
data['xrefs'] = [s]
dg.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr
xr.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr
else:
ilx_start -= 1
continue
new_terms[id_] = data
dg.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class)
xr.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class)
for syn in syns:
if syn.toPython() not in data['labels']:
if len(syn) > 3:
dg.add_trip(id_, 'NIFRID:synonym', syn)
elif syn:
dg.add_trip(id_, 'NIFRID:abbrev', syn)
if 'EPHYS' in s or any(['EPHYS' in x for x in data['xrefs']]):
dg.add_trip(id_, rdflib.RDFS.subClassOf, ephys_phenotype)
elif 'MORPHOLOGY' in s or any(['MORPHOLOGY' in x for x in data['xrefs']]):
dg.add_trip(id_, rdflib.RDFS.subClassOf, morpho_phenotype)
#dg.write(convert=False)
xr.write(convert=False)
#skip this for now, we can use DG to do lookups later
#for t in dg.g.triples((None, None, None)):
#g.add_trip(*t) # only way to clean prefixes :/
add_phenotypes(g)
g.write(convert=False)
g2 = makeGraph('pheno-comp', PREFIXES)
for t in ng.triples((None, None, None)):
g2.add_trip(*t) # only way to clean prefixes :/
g2.write(convert=False)
syn_mappings = {}
for sub, syn in [_ for _ in g.g.subject_objects(g.expand('NIFRID:synonym'))] + [_ for _ in g.g.subject_objects(rdflib.RDFS.label)]:
syn = syn.toPython()
if syn in syn_mappings:
log.error(f'duplicate synonym! {syn} {sub}')
syn_mappings[syn] = sub
#embed()
return syn_mappings, pedges, ilx_start | def function[_rest_make_phenotypes, parameter[]]:
variable[neuroner] assign[=] call[call[name[Path], parameter[name[devconfig].git_local_base, constant[neuroNER/resources/bluima/neuroner/hbp_morphology_ontology.obo]]].as_posix, parameter[]]
variable[neuroner1] assign[=] call[call[name[Path], parameter[name[devconfig].git_local_base, constant[neuroNER/resources/bluima/neuroner/hbp_electrophysiology_ontology.obo]]].as_posix, parameter[]]
variable[neuroner2] assign[=] call[call[name[Path], parameter[name[devconfig].git_local_base, constant[neuroNER/resources/bluima/neuroner/hbp_electrophysiology-triggers_ontology.obo]]].as_posix, parameter[]]
variable[nif_qual] assign[=] call[call[name[Path], parameter[name[devconfig].ontology_local_repo, constant[ttl/NIF-Quality.ttl]]].as_posix, parameter[]]
variable[mo] assign[=] call[name[OboFile], parameter[call[name[os].path.expanduser, parameter[name[neuroner]]]]]
variable[mo1] assign[=] call[name[OboFile], parameter[call[name[os].path.expanduser, parameter[name[neuroner1]]]]]
variable[mo2] assign[=] call[name[OboFile], parameter[call[name[os].path.expanduser, parameter[name[neuroner2]]]]]
variable[mo_ttl] assign[=] binary_operation[binary_operation[call[name[mo].__ttl__, parameter[]] + call[name[mo1].__ttl__, parameter[]]] + call[name[mo2].__ttl__, parameter[]]]
variable[mo_ttl] assign[=] binary_operation[constant[ @prefix : <http://FIXME.org/> .
@prefix nsu: <http://www.FIXME.org/nsupper#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
] + name[mo_ttl]]
variable[ng] assign[=] call[name[rdflib].Graph, parameter[]]
call[name[ng].parse, parameter[]]
call[name[ng].parse, parameter[call[name[os].path.expanduser, parameter[name[nif_qual]]]]]
call[name[ng].remove, parameter[tuple[[<ast.Constant object at 0x7da1b1a10f70>, <ast.Attribute object at 0x7da1b1a11000>, <ast.Constant object at 0x7da1b1a13910>]]]]
variable[bad_match] assign[=] <ast.Set object at 0x7da1b1a13280>
variable[exact] assign[=] list[[]]
variable[similar] assign[=] list[[]]
variable[quals] assign[=] list[[]]
variable[s2] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1a11e40>, <ast.Name object at 0x7da1b1a12aa0>]]] in starred[call[name[sorted], parameter[call[name[ng].subject_objects, parameter[name[rdflib].RDFS.label]]]]] begin[:]
variable[syns] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b1a116f0>]]
call[name[syns].update, parameter[call[name[set], parameter[<ast.ListComp object at 0x7da1b1a11480>]]]]
if compare[constant[quality] in call[name[label].lower, parameter[]]] begin[:]
call[name[quals].append, parameter[tuple[[<ast.Name object at 0x7da1b1a13d90>, <ast.Name object at 0x7da1b1a13fd0>]]]]
variable[subpre] assign[=] call[call[name[ng].namespace_manager.compute_qname, parameter[name[subject]]]][constant[1]]
variable[llower] assign[=] call[name[rdflib].Literal, parameter[call[name[label].lower, parameter[]]]]
for taget[name[s]] in starred[call[name[ng].subjects, parameter[name[rdflib].RDFS.label, name[llower]]]] begin[:]
if compare[name[s] not_equal[!=] name[subject]] begin[:]
call[name[exact].append, parameter[tuple[[<ast.Name object at 0x7da1b1ad9510>, <ast.Name object at 0x7da1b1ad94e0>, <ast.Name object at 0x7da1b1ad94b0>, <ast.Name object at 0x7da1b1ad9480>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1ad8f40>, <ast.Name object at 0x7da1b1ad8f10>, <ast.Name object at 0x7da1b1ad8e20>]]] in starred[call[name[sorted], parameter[call[name[ng].triples, parameter[tuple[[<ast.Constant object at 0x7da1b1ad8640>, <ast.Attribute object at 0x7da1b1ad8d00>, <ast.Constant object at 0x7da1b1ad9db0>]]]]]]] begin[:]
variable[spre] assign[=] call[call[name[ng].namespace_manager.compute_qname, parameter[name[s]]]][constant[1]]
if <ast.BoolOp object at 0x7da1b1ad9e70> begin[:]
if <ast.BoolOp object at 0x7da1b1ad8280> begin[:]
continue
call[name[similar].append, parameter[tuple[[<ast.Name object at 0x7da1b1ad8070>, <ast.Name object at 0x7da1b1ad98d0>, <ast.Name object at 0x7da1b1ad9900>, <ast.Name object at 0x7da1b1ad9a20>]]]]
if compare[call[name[subpre].toPython, parameter[]] equal[==] constant[http://FIXME.org/]] begin[:]
call[name[print], parameter[constant[YAY]]]
call[name[print], parameter[name[label], constant[,], name[o]]]
call[name[print], parameter[name[subject], name[s]]]
<ast.Tuple object at 0x7da1b1adb4c0> assign[=] tuple[[<ast.Name object at 0x7da1b1adb280>, <ast.Name object at 0x7da1b1adb2b0>]]
<ast.Tuple object at 0x7da1b1adb370> assign[=] tuple[[<ast.Name object at 0x7da1b1adb250>, <ast.Name object at 0x7da1b1adb460>]]
if compare[name[subject] in name[s2]] begin[:]
call[call[call[name[s2]][name[subject]]][constant[syns]].update, parameter[name[syns]]]
call[call[call[name[s2]][name[subject]]][constant[syns]].add, parameter[name[label]]]
<ast.AugAssign object at 0x7da1b1adb610>
call[name[pprint], parameter[name[quals]]]
constant[ print stuff
print('matches')
pprint(exact)
pprint(similar)
#print('EXACT', exact)
print()
for k, v in s2.items():
print(k)
for k, v2 in sorted(v.items()):
print(' ', k, ':', v2)
#]
variable[desired_nif_terms] assign[=] call[name[set], parameter[]]
variable[starts] assign[=] list[[<ast.Call object at 0x7da1b1ad8dc0>, <ast.Call object at 0x7da1b1ad8fa0>, <ast.Call object at 0x7da1b1ad9120>, <ast.Call object at 0x7da1b1ad8940>, <ast.Call object at 0x7da1b1ad87f0>]]
for taget[name[id_]] in starred[name[starts]] begin[:]
variable[want] assign[=] call[name[sgg].getNeighbors, parameter[name[id_]]]
call[name[desired_nif_terms].update, parameter[<ast.ListComp object at 0x7da1b1aa63b0>]]
call[name[print], parameter[name[desired_nif_terms]]]
variable[ilx_start] assign[=] constant[50114]
call[name[print], parameter[call[name[ilx_base].format, parameter[name[ilx_start]]]]]
variable[new_terms] assign[=] dictionary[[], []]
variable[dg] assign[=] call[name[makeGraph], parameter[constant[uwotm8]]]
variable[xr] assign[=] call[name[makeGraph], parameter[constant[xrefs]]]
for taget[tuple[[<ast.Name object at 0x7da1b1aa4610>, <ast.Name object at 0x7da1b1aa55d0>]]] in starred[call[call[name[sorted], parameter[call[name[ng].subject_objects, parameter[name[rdflib].RDFS.label]]]]][<ast.Slice object at 0x7da1b1aa5ff0>]] begin[:]
variable[spre] assign[=] call[call[name[ng].namespace_manager.compute_qname, parameter[name[s]]]][constant[1]]
if compare[name[s] in name[new_terms]] begin[:]
call[name[print], parameter[name[s], constant[already in as xref probably]]]
continue
variable[syns] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b1aa6050>]]
variable[data] assign[=] dictionary[[], []]
variable[id_] assign[=] call[name[ilx_base].format, parameter[name[ilx_start]]]
<ast.AugAssign object at 0x7da1b1aa6e60>
if compare[name[s] in name[s2]] begin[:]
variable[d] assign[=] call[name[s2]][name[s]]
call[name[syns].update, parameter[call[name[d]][constant[syns]]]]
call[name[new_terms]][call[call[name[d]][constant[xrefs]]][constant[0]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1aa48e0>], [<ast.Name object at 0x7da1b1aa4730>]]
call[name[xr].add_trip, parameter[call[call[name[d]][constant[xrefs]]][constant[0]], constant[oboInOwl:replacedBy], name[id_]]]
call[name[new_terms]][call[call[name[d]][constant[xrefs]]][constant[1]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1aa6c50>], [<ast.Name object at 0x7da1b1aa5090>]]
call[name[xr].add_trip, parameter[call[call[name[d]][constant[xrefs]]][constant[1]], constant[oboInOwl:replacedBy], name[id_]]]
call[name[data]][constant[labels]] assign[=] list[[<ast.Subscript object at 0x7da1b1aa7280>, <ast.Subscript object at 0x7da1b1aa6710>]]
call[name[dg].add_trip, parameter[name[id_], name[rdflib].RDFS.label, call[name[d]][constant[o]]]]
call[name[data]][constant[xrefs]] assign[=] call[name[d]][constant[xrefs]]
for taget[name[x]] in starred[call[name[d]][constant[xrefs]]] begin[:]
call[name[dg].add_trip, parameter[name[id_], constant[oboInOwl:hasDbXref], name[x]]]
call[name[xr].add_trip, parameter[name[id_], constant[oboInOwl:hasDbXref], name[x]]]
call[name[new_terms]][name[id_]] assign[=] name[data]
call[name[dg].add_trip, parameter[name[id_], name[rdflib].RDF.type, name[rdflib].OWL.Class]]
call[name[xr].add_trip, parameter[name[id_], name[rdflib].RDF.type, name[rdflib].OWL.Class]]
for taget[name[syn]] in starred[name[syns]] begin[:]
if compare[call[name[syn].toPython, parameter[]] <ast.NotIn object at 0x7da2590d7190> call[name[data]][constant[labels]]] begin[:]
if compare[call[name[len], parameter[name[syn]]] greater[>] constant[3]] begin[:]
call[name[dg].add_trip, parameter[name[id_], constant[NIFRID:synonym], name[syn]]]
if <ast.BoolOp object at 0x7da1b1a22740> begin[:]
call[name[dg].add_trip, parameter[name[id_], name[rdflib].RDFS.subClassOf, name[ephys_phenotype]]]
call[name[xr].write, parameter[]]
call[name[add_phenotypes], parameter[name[g]]]
call[name[g].write, parameter[]]
variable[g2] assign[=] call[name[makeGraph], parameter[constant[pheno-comp], name[PREFIXES]]]
for taget[name[t]] in starred[call[name[ng].triples, parameter[tuple[[<ast.Constant object at 0x7da1b1b012a0>, <ast.Constant object at 0x7da1b1b02620>, <ast.Constant object at 0x7da1b1b00d90>]]]]] begin[:]
call[name[g2].add_trip, parameter[<ast.Starred object at 0x7da1b1b00250>]]
call[name[g2].write, parameter[]]
variable[syn_mappings] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1b01e40>, <ast.Name object at 0x7da1b1b01e70>]]] in starred[binary_operation[<ast.ListComp object at 0x7da1b1b03b20> + <ast.ListComp object at 0x7da1b1b00bb0>]] begin[:]
variable[syn] assign[=] call[name[syn].toPython, parameter[]]
if compare[name[syn] in name[syn_mappings]] begin[:]
call[name[log].error, parameter[<ast.JoinedStr object at 0x7da1b1b03be0>]]
call[name[syn_mappings]][name[syn]] assign[=] name[sub]
return[tuple[[<ast.Name object at 0x7da1b1a7e980>, <ast.Name object at 0x7da1b1a7ec50>, <ast.Name object at 0x7da1b1a7e770>]]] | keyword[def] identifier[_rest_make_phenotypes] ():
identifier[neuroner] = identifier[Path] ( identifier[devconfig] . identifier[git_local_base] ,
literal[string] ). identifier[as_posix] ()
identifier[neuroner1] = identifier[Path] ( identifier[devconfig] . identifier[git_local_base] ,
literal[string] ). identifier[as_posix] ()
identifier[neuroner2] = identifier[Path] ( identifier[devconfig] . identifier[git_local_base] ,
literal[string] ). identifier[as_posix] ()
identifier[nif_qual] = identifier[Path] ( identifier[devconfig] . identifier[ontology_local_repo] ,
literal[string] ). identifier[as_posix] ()
identifier[mo] = identifier[OboFile] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[neuroner] ))
identifier[mo1] = identifier[OboFile] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[neuroner1] ))
identifier[mo2] = identifier[OboFile] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[neuroner2] ))
identifier[mo_ttl] = identifier[mo] . identifier[__ttl__] ()+ identifier[mo1] . identifier[__ttl__] ()+ identifier[mo2] . identifier[__ttl__] ()
identifier[mo_ttl] = literal[string] + identifier[mo_ttl]
identifier[ng] = identifier[rdflib] . identifier[Graph] ()
identifier[ng] . identifier[parse] ( identifier[data] = identifier[mo_ttl] , identifier[format] = literal[string] )
identifier[ng] . identifier[parse] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[nif_qual] ), identifier[format] = literal[string] )
identifier[ng] . identifier[remove] (( keyword[None] , identifier[rdflib] . identifier[OWL] . identifier[imports] , keyword[None] ))
identifier[bad_match] ={
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
}
identifier[exact] =[]
identifier[similar] =[]
identifier[quals] =[]
identifier[s2] ={}
keyword[for] identifier[subject] , identifier[label] keyword[in] identifier[sorted] ( identifier[ng] . identifier[subject_objects] ( identifier[rdflib] . identifier[RDFS] . identifier[label] )):
identifier[syns] = identifier[set] ([ identifier[a] keyword[for] identifier[a] keyword[in] identifier[ng] . identifier[objects] ( identifier[subject] , identifier[rdflib] . identifier[URIRef] ( literal[string] ))])
identifier[syns] . identifier[update] ( identifier[set] ([ identifier[a] keyword[for] identifier[a] keyword[in] identifier[ng] . identifier[objects] ( identifier[subject] , identifier[rdflib] . identifier[URIRef] ( literal[string] ))]))
keyword[if] literal[string] keyword[in] identifier[label] . identifier[lower] ():
identifier[quals] . identifier[append] (( identifier[subject] , identifier[label] ))
identifier[subpre] = identifier[ng] . identifier[namespace_manager] . identifier[compute_qname] ( identifier[subject] )[ literal[int] ]
identifier[llower] = identifier[rdflib] . identifier[Literal] ( identifier[label] . identifier[lower] (), identifier[lang] = literal[string] )
keyword[for] identifier[s] keyword[in] identifier[ng] . identifier[subjects] ( identifier[rdflib] . identifier[RDFS] . identifier[label] , identifier[llower] ):
keyword[if] identifier[s] != identifier[subject] :
identifier[exact] . identifier[append] (( identifier[subject] , identifier[s] , identifier[label] , identifier[llower] ))
keyword[for] identifier[s] , identifier[p] , identifier[o] keyword[in] identifier[sorted] ( identifier[ng] . identifier[triples] (( keyword[None] , identifier[rdflib] . identifier[RDFS] . identifier[label] , keyword[None] ))):
identifier[spre] = identifier[ng] . identifier[namespace_manager] . identifier[compute_qname] ( identifier[s] )[ literal[int] ]
keyword[if] identifier[subject] != identifier[s] keyword[and] identifier[label] . identifier[lower] () keyword[in] identifier[o] . identifier[lower] (). identifier[split] ( literal[string] ) keyword[and] identifier[spre] != identifier[subpre] :
keyword[if] identifier[s] . identifier[toPython] () keyword[in] identifier[bad_match] keyword[or] identifier[subject] . identifier[toPython] () keyword[in] identifier[bad_match] :
keyword[continue]
identifier[similar] . identifier[append] (( identifier[subject] , identifier[s] , identifier[label] , identifier[o] ))
keyword[if] identifier[subpre] . identifier[toPython] ()== literal[string] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[label] , literal[string] , identifier[o] )
identifier[print] ( identifier[subject] , identifier[s] )
identifier[subject] , identifier[s] = identifier[s] , identifier[subject]
identifier[label] , identifier[o] = identifier[o] , identifier[label]
keyword[if] identifier[subject] keyword[in] identifier[s2] :
identifier[s2] [ identifier[subject] ][ literal[string] ]. identifier[update] ( identifier[syns] )
identifier[s2] [ identifier[subject] ][ literal[string] ]. identifier[add] ( identifier[label] )
identifier[s2] [ identifier[subject] ][ literal[string] ]+=[ identifier[subject] , identifier[s] ]
keyword[else] :
identifier[s2] [ identifier[subject] ]={ literal[string] : identifier[label] . identifier[toPython] (), literal[string] : identifier[o] . identifier[toPython] (), literal[string] :[ identifier[subject] , identifier[s] ], literal[string] : identifier[syns] }
identifier[pprint] ( identifier[quals] )
literal[string]
identifier[desired_nif_terms] = identifier[set] ()
identifier[starts] =[
identifier[quote] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ),
identifier[quote] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ),
identifier[quote] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ),
identifier[quote] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ),
identifier[quote] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ),
]
keyword[for] identifier[id_] keyword[in] identifier[starts] :
identifier[want] = identifier[sgg] . identifier[getNeighbors] ( identifier[id_] , identifier[relationshipType] = literal[string] , identifier[direction] = literal[string] , identifier[depth] = literal[int] )
identifier[desired_nif_terms] . identifier[update] ([ identifier[n] [ literal[string] ] keyword[for] identifier[n] keyword[in] identifier[want] [ literal[string] ]])
identifier[print] ( identifier[desired_nif_terms] )
identifier[ilx_start] = literal[int]
identifier[print] ( identifier[ilx_base] . identifier[format] ( identifier[ilx_start] ))
identifier[new_terms] ={}
identifier[dg] = identifier[makeGraph] ( literal[string] , identifier[prefixes] = identifier[PREFIXES] )
identifier[xr] = identifier[makeGraph] ( literal[string] , identifier[prefixes] = identifier[PREFIXES] )
keyword[for] identifier[s] , identifier[o] keyword[in] identifier[sorted] ( identifier[ng] . identifier[subject_objects] ( identifier[rdflib] . identifier[RDFS] . identifier[label] ))[::- literal[int] ]:
identifier[spre] = identifier[ng] . identifier[namespace_manager] . identifier[compute_qname] ( identifier[s] )[ literal[int] ]
keyword[if] identifier[s] keyword[in] identifier[new_terms] :
identifier[print] ( identifier[s] , literal[string] )
keyword[continue]
identifier[syns] = identifier[set] ([ identifier[s] keyword[for] identifier[s] keyword[in] identifier[ng] . identifier[objects] ( identifier[s] , identifier[dg] . identifier[namespaces] [ literal[string] ][ literal[string] ])])
identifier[data] ={}
identifier[id_] = identifier[ilx_base] . identifier[format] ( identifier[ilx_start] )
identifier[ilx_start] += literal[int]
keyword[if] identifier[s] keyword[in] identifier[s2] :
identifier[d] = identifier[s2] [ identifier[s] ]
identifier[syns] . identifier[update] ( identifier[d] [ literal[string] ])
identifier[new_terms] [ identifier[d] [ literal[string] ][ literal[int] ]]={ literal[string] : identifier[id_] }
identifier[xr] . identifier[add_trip] ( identifier[d] [ literal[string] ][ literal[int] ], literal[string] , identifier[id_] )
identifier[new_terms] [ identifier[d] [ literal[string] ][ literal[int] ]]={ literal[string] : identifier[id_] }
identifier[xr] . identifier[add_trip] ( identifier[d] [ literal[string] ][ literal[int] ], literal[string] , identifier[id_] )
identifier[data] [ literal[string] ]=[ identifier[d] [ literal[string] ], identifier[d] [ literal[string] ]]
identifier[dg] . identifier[add_trip] ( identifier[id_] , identifier[rdflib] . identifier[RDFS] . identifier[label] , identifier[d] [ literal[string] ])
identifier[data] [ literal[string] ]= identifier[d] [ literal[string] ]
keyword[for] identifier[x] keyword[in] identifier[d] [ literal[string] ]:
identifier[dg] . identifier[add_trip] ( identifier[id_] , literal[string] , identifier[x] )
identifier[xr] . identifier[add_trip] ( identifier[id_] , literal[string] , identifier[x] )
keyword[elif] identifier[spre] . identifier[toPython] ()!= literal[string] keyword[or] identifier[ng] . identifier[namespace_manager] . identifier[qname] ( identifier[s] ). identifier[replace] ( literal[string] , literal[string] ) keyword[in] identifier[desired_nif_terms] :
identifier[new_terms] [ identifier[s] ]={ literal[string] : identifier[id_] }
identifier[xr] . identifier[add_trip] ( identifier[s] , literal[string] , identifier[id_] )
identifier[data] [ literal[string] ]=[ identifier[o] . identifier[toPython] ()]
identifier[dg] . identifier[add_trip] ( identifier[id_] , identifier[rdflib] . identifier[RDFS] . identifier[label] , identifier[o] . identifier[toPython] ())
identifier[data] [ literal[string] ]=[ identifier[s] ]
identifier[dg] . identifier[add_trip] ( identifier[id_] , literal[string] , identifier[s] )
identifier[xr] . identifier[add_trip] ( identifier[id_] , literal[string] , identifier[s] )
keyword[else] :
identifier[ilx_start] -= literal[int]
keyword[continue]
identifier[new_terms] [ identifier[id_] ]= identifier[data]
identifier[dg] . identifier[add_trip] ( identifier[id_] , identifier[rdflib] . identifier[RDF] . identifier[type] , identifier[rdflib] . identifier[OWL] . identifier[Class] )
identifier[xr] . identifier[add_trip] ( identifier[id_] , identifier[rdflib] . identifier[RDF] . identifier[type] , identifier[rdflib] . identifier[OWL] . identifier[Class] )
keyword[for] identifier[syn] keyword[in] identifier[syns] :
keyword[if] identifier[syn] . identifier[toPython] () keyword[not] keyword[in] identifier[data] [ literal[string] ]:
keyword[if] identifier[len] ( identifier[syn] )> literal[int] :
identifier[dg] . identifier[add_trip] ( identifier[id_] , literal[string] , identifier[syn] )
keyword[elif] identifier[syn] :
identifier[dg] . identifier[add_trip] ( identifier[id_] , literal[string] , identifier[syn] )
keyword[if] literal[string] keyword[in] identifier[s] keyword[or] identifier[any] ([ literal[string] keyword[in] identifier[x] keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ]]):
identifier[dg] . identifier[add_trip] ( identifier[id_] , identifier[rdflib] . identifier[RDFS] . identifier[subClassOf] , identifier[ephys_phenotype] )
keyword[elif] literal[string] keyword[in] identifier[s] keyword[or] identifier[any] ([ literal[string] keyword[in] identifier[x] keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ]]):
identifier[dg] . identifier[add_trip] ( identifier[id_] , identifier[rdflib] . identifier[RDFS] . identifier[subClassOf] , identifier[morpho_phenotype] )
identifier[xr] . identifier[write] ( identifier[convert] = keyword[False] )
identifier[add_phenotypes] ( identifier[g] )
identifier[g] . identifier[write] ( identifier[convert] = keyword[False] )
identifier[g2] = identifier[makeGraph] ( literal[string] , identifier[PREFIXES] )
keyword[for] identifier[t] keyword[in] identifier[ng] . identifier[triples] (( keyword[None] , keyword[None] , keyword[None] )):
identifier[g2] . identifier[add_trip] (* identifier[t] )
identifier[g2] . identifier[write] ( identifier[convert] = keyword[False] )
identifier[syn_mappings] ={}
keyword[for] identifier[sub] , identifier[syn] keyword[in] [ identifier[_] keyword[for] identifier[_] keyword[in] identifier[g] . identifier[g] . identifier[subject_objects] ( identifier[g] . identifier[expand] ( literal[string] ))]+[ identifier[_] keyword[for] identifier[_] keyword[in] identifier[g] . identifier[g] . identifier[subject_objects] ( identifier[rdflib] . identifier[RDFS] . identifier[label] )]:
identifier[syn] = identifier[syn] . identifier[toPython] ()
keyword[if] identifier[syn] keyword[in] identifier[syn_mappings] :
identifier[log] . identifier[error] ( literal[string] )
identifier[syn_mappings] [ identifier[syn] ]= identifier[sub]
keyword[return] identifier[syn_mappings] , identifier[pedges] , identifier[ilx_start] | def _rest_make_phenotypes():
#phenotype sources
neuroner = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_morphology_ontology.obo').as_posix()
neuroner1 = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_electrophysiology_ontology.obo').as_posix()
neuroner2 = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_electrophysiology-triggers_ontology.obo').as_posix()
nif_qual = Path(devconfig.ontology_local_repo, 'ttl/NIF-Quality.ttl').as_posix()
mo = OboFile(os.path.expanduser(neuroner))
mo1 = OboFile(os.path.expanduser(neuroner1))
mo2 = OboFile(os.path.expanduser(neuroner2))
mo_ttl = mo.__ttl__() + mo1.__ttl__() + mo2.__ttl__()
mo_ttl = ' @prefix : <http://FIXME.org/> .\n @prefix nsu: <http://www.FIXME.org/nsupper#> .\n @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n @prefix owl: <http://www.w3.org/2002/07/owl#> .\n @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n ' + mo_ttl
#sio = io.StringIO()
#sio.write(mo_ttl)
ng = rdflib.Graph()
ng.parse(data=mo_ttl, format='turtle')
ng.parse(os.path.expanduser(nif_qual), format='turtle')
#ng.namespace_manager.bind('default1', None, override=False, replace=True)
ng.remove((None, rdflib.OWL.imports, None))
bad_match = {'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090505', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1693353776', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1288413465', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao4459136323', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090507'}
exact = []
similar = []
quals = []
s2 = {}
for (subject, label) in sorted(ng.subject_objects(rdflib.RDFS.label)):
syns = set([a for a in ng.objects(subject, rdflib.URIRef('http://www.FIXME.org/nsupper#synonym'))])
syns.update(set([a for a in ng.objects(subject, rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/OBO_annotation_properties.owl#synonym'))]))
#if syns:
#print(syns)
#print(subject)
#print(label.lower())
if 'quality' in label.lower():
quals.append((subject, label)) # depends on [control=['if'], data=[]]
subpre = ng.namespace_manager.compute_qname(subject)[1]
llower = rdflib.Literal(label.lower(), lang='en')
for s in ng.subjects(rdflib.RDFS.label, llower):
if s != subject:
exact.append((subject, s, label, llower)) # depends on [control=['if'], data=['s', 'subject']] # depends on [control=['for'], data=['s']]
for (s, p, o) in sorted(ng.triples((None, rdflib.RDFS.label, None))):
spre = ng.namespace_manager.compute_qname(s)[1]
if subject != s and label.lower() in o.lower().split(' ') and (spre != subpre):
if s.toPython() in bad_match or subject.toPython() in bad_match:
continue # depends on [control=['if'], data=[]]
#print()
#print(spre, subpre)
similar.append((subject, s, label, o))
if subpre.toPython() == 'http://FIXME.org/':
print('YAY')
print(label, ',', o)
print(subject, s)
(subject, s) = (s, subject)
(label, o) = (o, label) # depends on [control=['if'], data=[]]
if subject in s2:
#print('YES IT EXISTS')
#print(syns, label, [subject, s])
s2[subject]['syns'].update(syns)
s2[subject]['syns'].add(label)
s2[subject]['xrefs'] += [subject, s] # depends on [control=['if'], data=['subject', 's2']]
else:
s2[subject] = {'label': label.toPython(), 'o': o.toPython(), 'xrefs': [subject, s], 'syns': syns} # FIXME overwrites # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
pprint(quals)
" print stuff\n print('matches')\n pprint(exact)\n pprint(similar)\n\n #print('EXACT', exact)\n\n print()\n for k, v in s2.items():\n print(k)\n for k, v2 in sorted(v.items()):\n print(' ', k, ':', v2)\n #"
desired_nif_terms = set() #{
#'NIFQUAL:sao1959705051', # dendrite
#'NIFQUAL:sao2088691397', # axon
#'NIFQUAL:sao1057800815', # morphological
#'NIFQUAL:sao-1126011106', # soma
#'NIFQUAL:',
#'NIFQUAL:',
#}
#"NIFQUAL:sao2088691397",
#"NIFQUAL:sao1278200674",
#"NIFQUAL:sao2088691397",
#"NIFQUAL:sao-1126011106", # FIXME WTF IS THIS NONSENSE (scigraph bug?)
starts = [quote('http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1959705051').replace('/', '%2F'), quote('http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397').replace('/', '%2F'), quote('http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1278200674').replace('/', '%2F'), quote('http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397').replace('/', '%2F'), quote('http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao-1126011106').replace('/', '%2F')]
for id_ in starts:
want = sgg.getNeighbors(id_, relationshipType='subClassOf', direction='INCOMING', depth=5)
#print(id_, want)
desired_nif_terms.update([n['id'] for n in want['nodes']]) # depends on [control=['for'], data=['id_']]
print(desired_nif_terms)
ilx_start = 50114
print(ilx_base.format(ilx_start))
new_terms = {}
dg = makeGraph('uwotm8', prefixes=PREFIXES)
xr = makeGraph('xrefs', prefixes=PREFIXES)
for (s, o) in sorted(ng.subject_objects(rdflib.RDFS.label))[::-1]:
spre = ng.namespace_manager.compute_qname(s)[1]
#if spre.toPython() == g.namespaces['NIFQUAL']:
#print('skipping', s)
#continue # TODO
if s in new_terms:
print(s, 'already in as xref probably')
continue # depends on [control=['if'], data=['s']]
#elif spre.toPython() != 'http://uri.interlex.org/base/ilx_' or spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms:
#elif spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms:
#print('DO NOT WANT', s, spre)
#continue
syns = set([s for s in ng.objects(s, dg.namespaces['nsu']['synonym'])])
#data['syns'] += syns
data = {}
id_ = ilx_base.format(ilx_start)
ilx_start += 1
if s in s2:
d = s2[s]
syns.update(d['syns'])
new_terms[d['xrefs'][0]] = {'replaced_by': id_}
xr.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_)
#dg.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_)
new_terms[d['xrefs'][1]] = {'replaced_by': id_}
xr.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_)
#dg.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_)
data['labels'] = [d['label'], d['o']]
#dg.add_trip(id_, rdflib.RDFS.label, d['label'])
dg.add_trip(id_, rdflib.RDFS.label, d['o'])
data['xrefs'] = d['xrefs']
for x in d['xrefs']: # FIXME... expecting order of evaluation errors here...
dg.add_trip(id_, 'oboInOwl:hasDbXref', x) # xr
xr.add_trip(id_, 'oboInOwl:hasDbXref', x) # x # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=['s', 's2']]
elif spre.toPython() != 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#' or ng.namespace_manager.qname(s).replace('default1', 'NIFQUAL') in desired_nif_terms: # skip non-xref quals
#print(ng.namespace_manager.qname(s).replace('default1','NIFQUAL'))
new_terms[s] = {'replaced_by': id_}
xr.add_trip(s, 'oboInOwl:replacedBy', id_)
data['labels'] = [o.toPython()]
dg.add_trip(id_, rdflib.RDFS.label, o.toPython())
data['xrefs'] = [s]
dg.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr
xr.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr # depends on [control=['if'], data=[]]
else:
ilx_start -= 1
continue
new_terms[id_] = data
dg.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class)
xr.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class)
for syn in syns:
if syn.toPython() not in data['labels']:
if len(syn) > 3:
dg.add_trip(id_, 'NIFRID:synonym', syn) # depends on [control=['if'], data=[]]
elif syn:
dg.add_trip(id_, 'NIFRID:abbrev', syn) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['syn']]
if 'EPHYS' in s or any(['EPHYS' in x for x in data['xrefs']]):
dg.add_trip(id_, rdflib.RDFS.subClassOf, ephys_phenotype) # depends on [control=['if'], data=[]]
elif 'MORPHOLOGY' in s or any(['MORPHOLOGY' in x for x in data['xrefs']]):
dg.add_trip(id_, rdflib.RDFS.subClassOf, morpho_phenotype) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
#dg.write(convert=False)
xr.write(convert=False)
#skip this for now, we can use DG to do lookups later
#for t in dg.g.triples((None, None, None)):
#g.add_trip(*t) # only way to clean prefixes :/
add_phenotypes(g)
g.write(convert=False)
g2 = makeGraph('pheno-comp', PREFIXES)
for t in ng.triples((None, None, None)):
g2.add_trip(*t) # only way to clean prefixes :/ # depends on [control=['for'], data=['t']]
g2.write(convert=False)
syn_mappings = {}
for (sub, syn) in [_ for _ in g.g.subject_objects(g.expand('NIFRID:synonym'))] + [_ for _ in g.g.subject_objects(rdflib.RDFS.label)]:
syn = syn.toPython()
if syn in syn_mappings:
log.error(f'duplicate synonym! {syn} {sub}') # depends on [control=['if'], data=['syn']]
syn_mappings[syn] = sub # depends on [control=['for'], data=[]]
#embed()
return (syn_mappings, pedges, ilx_start) |
def _prior_running_states(jid):
'''
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
'''
ret = []
active = __salt__['saltutil.is_running']('state.*')
for data in active:
try:
data_jid = int(data['jid'])
except ValueError:
continue
if data_jid < int(jid):
ret.append(data)
return ret | def function[_prior_running_states, parameter[jid]]:
constant[
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
]
variable[ret] assign[=] list[[]]
variable[active] assign[=] call[call[name[__salt__]][constant[saltutil.is_running]], parameter[constant[state.*]]]
for taget[name[data]] in starred[name[active]] begin[:]
<ast.Try object at 0x7da1b21373d0>
if compare[name[data_jid] less[<] call[name[int], parameter[name[jid]]]] begin[:]
call[name[ret].append, parameter[name[data]]]
return[name[ret]] | keyword[def] identifier[_prior_running_states] ( identifier[jid] ):
literal[string]
identifier[ret] =[]
identifier[active] = identifier[__salt__] [ literal[string] ]( literal[string] )
keyword[for] identifier[data] keyword[in] identifier[active] :
keyword[try] :
identifier[data_jid] = identifier[int] ( identifier[data] [ literal[string] ])
keyword[except] identifier[ValueError] :
keyword[continue]
keyword[if] identifier[data_jid] < identifier[int] ( identifier[jid] ):
identifier[ret] . identifier[append] ( identifier[data] )
keyword[return] identifier[ret] | def _prior_running_states(jid):
"""
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
"""
ret = []
active = __salt__['saltutil.is_running']('state.*')
for data in active:
try:
data_jid = int(data['jid']) # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
if data_jid < int(jid):
ret.append(data) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data']]
return ret |
def create_in_cluster(self):
"""
call Kubernetes API and create this Service in cluster,
raise ConuExeption if the API call fails
:return: None
"""
try:
self.api.create_namespaced_service(self.namespace, self.body)
except ApiException as e:
raise ConuException(
"Exception when calling Kubernetes API - create_namespaced_service: {}\n".format(e))
logger.info(
"Creating Service %s in namespace: %s", self.name, self.namespace) | def function[create_in_cluster, parameter[self]]:
constant[
call Kubernetes API and create this Service in cluster,
raise ConuExeption if the API call fails
:return: None
]
<ast.Try object at 0x7da1b1172170>
call[name[logger].info, parameter[constant[Creating Service %s in namespace: %s], name[self].name, name[self].namespace]] | keyword[def] identifier[create_in_cluster] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[api] . identifier[create_namespaced_service] ( identifier[self] . identifier[namespace] , identifier[self] . identifier[body] )
keyword[except] identifier[ApiException] keyword[as] identifier[e] :
keyword[raise] identifier[ConuException] (
literal[string] . identifier[format] ( identifier[e] ))
identifier[logger] . identifier[info] (
literal[string] , identifier[self] . identifier[name] , identifier[self] . identifier[namespace] ) | def create_in_cluster(self):
"""
call Kubernetes API and create this Service in cluster,
raise ConuExeption if the API call fails
:return: None
"""
try:
self.api.create_namespaced_service(self.namespace, self.body) # depends on [control=['try'], data=[]]
except ApiException as e:
raise ConuException('Exception when calling Kubernetes API - create_namespaced_service: {}\n'.format(e)) # depends on [control=['except'], data=['e']]
logger.info('Creating Service %s in namespace: %s', self.name, self.namespace) |
def randn_ktensor(shape, rank, norm=None, random_state=None):
"""
Generates a random N-way tensor with rank R, where the entries are
drawn from the standard normal distribution.
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2)
"""
# Check input.
rns = _check_random_state(random_state)
# Draw low-rank factor matrices with i.i.d. Gaussian elements.
factors = KTensor([rns.standard_normal((i, rank)) for i in shape])
return _rescale_tensor(factors, norm) | def function[randn_ktensor, parameter[shape, rank, norm, random_state]]:
constant[
Generates a random N-way tensor with rank R, where the entries are
drawn from the standard normal distribution.
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2)
]
variable[rns] assign[=] call[name[_check_random_state], parameter[name[random_state]]]
variable[factors] assign[=] call[name[KTensor], parameter[<ast.ListComp object at 0x7da2044c30a0>]]
return[call[name[_rescale_tensor], parameter[name[factors], name[norm]]]] | keyword[def] identifier[randn_ktensor] ( identifier[shape] , identifier[rank] , identifier[norm] = keyword[None] , identifier[random_state] = keyword[None] ):
literal[string]
identifier[rns] = identifier[_check_random_state] ( identifier[random_state] )
identifier[factors] = identifier[KTensor] ([ identifier[rns] . identifier[standard_normal] (( identifier[i] , identifier[rank] )) keyword[for] identifier[i] keyword[in] identifier[shape] ])
keyword[return] identifier[_rescale_tensor] ( identifier[factors] , identifier[norm] ) | def randn_ktensor(shape, rank, norm=None, random_state=None):
"""
Generates a random N-way tensor with rank R, where the entries are
drawn from the standard normal distribution.
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2)
"""
# Check input.
rns = _check_random_state(random_state)
# Draw low-rank factor matrices with i.i.d. Gaussian elements.
factors = KTensor([rns.standard_normal((i, rank)) for i in shape])
return _rescale_tensor(factors, norm) |
def read_file_bytes(input_file_path):
"""
Read the file at the given file path
and return its contents as a byte string,
or ``None`` if an error occurred.
:param string input_file_path: the file path
:rtype: bytes
"""
contents = None
try:
with io.open(input_file_path, "rb") as input_file:
contents = input_file.read()
except:
pass
return contents | def function[read_file_bytes, parameter[input_file_path]]:
constant[
Read the file at the given file path
and return its contents as a byte string,
or ``None`` if an error occurred.
:param string input_file_path: the file path
:rtype: bytes
]
variable[contents] assign[=] constant[None]
<ast.Try object at 0x7da1b1880b20>
return[name[contents]] | keyword[def] identifier[read_file_bytes] ( identifier[input_file_path] ):
literal[string]
identifier[contents] = keyword[None]
keyword[try] :
keyword[with] identifier[io] . identifier[open] ( identifier[input_file_path] , literal[string] ) keyword[as] identifier[input_file] :
identifier[contents] = identifier[input_file] . identifier[read] ()
keyword[except] :
keyword[pass]
keyword[return] identifier[contents] | def read_file_bytes(input_file_path):
"""
Read the file at the given file path
and return its contents as a byte string,
or ``None`` if an error occurred.
:param string input_file_path: the file path
:rtype: bytes
"""
contents = None
try:
with io.open(input_file_path, 'rb') as input_file:
contents = input_file.read() # depends on [control=['with'], data=['input_file']] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
return contents |
def getpage(self, wiki_space, page_title):
""" Fetches a page object.
Returns None if the page does not exist or otherwise could not be fetched.
"""
try:
return self._api_entrypoint.getPage(self._session_token, wiki_space, page_title)
except XMLRPCError as e:
log.warning('Failed to fetch page %s: %s' % (page_title, e))
return None | def function[getpage, parameter[self, wiki_space, page_title]]:
constant[ Fetches a page object.
Returns None if the page does not exist or otherwise could not be fetched.
]
<ast.Try object at 0x7da1b22ba500> | keyword[def] identifier[getpage] ( identifier[self] , identifier[wiki_space] , identifier[page_title] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_api_entrypoint] . identifier[getPage] ( identifier[self] . identifier[_session_token] , identifier[wiki_space] , identifier[page_title] )
keyword[except] identifier[XMLRPCError] keyword[as] identifier[e] :
identifier[log] . identifier[warning] ( literal[string] %( identifier[page_title] , identifier[e] ))
keyword[return] keyword[None] | def getpage(self, wiki_space, page_title):
""" Fetches a page object.
Returns None if the page does not exist or otherwise could not be fetched.
"""
try:
return self._api_entrypoint.getPage(self._session_token, wiki_space, page_title) # depends on [control=['try'], data=[]]
except XMLRPCError as e:
log.warning('Failed to fetch page %s: %s' % (page_title, e))
return None # depends on [control=['except'], data=['e']] |
def connect_to_another_user(self, user, password, token=None, is_public=False):
"""
Authenticates user with the same tenant as current platform using and returns new platform to user.
:rtype: QubellPlatform
:param str user: user email
:param str password: user password
:param str token: session token
:param bool is_public: either to use public or private api (public is not fully supported use with caution)
:return: New Platform instance
"""
return QubellPlatform.connect(self._router.base_url, user, password, token, is_public) | def function[connect_to_another_user, parameter[self, user, password, token, is_public]]:
constant[
Authenticates user with the same tenant as current platform using and returns new platform to user.
:rtype: QubellPlatform
:param str user: user email
:param str password: user password
:param str token: session token
:param bool is_public: either to use public or private api (public is not fully supported use with caution)
:return: New Platform instance
]
return[call[name[QubellPlatform].connect, parameter[name[self]._router.base_url, name[user], name[password], name[token], name[is_public]]]] | keyword[def] identifier[connect_to_another_user] ( identifier[self] , identifier[user] , identifier[password] , identifier[token] = keyword[None] , identifier[is_public] = keyword[False] ):
literal[string]
keyword[return] identifier[QubellPlatform] . identifier[connect] ( identifier[self] . identifier[_router] . identifier[base_url] , identifier[user] , identifier[password] , identifier[token] , identifier[is_public] ) | def connect_to_another_user(self, user, password, token=None, is_public=False):
"""
Authenticates user with the same tenant as current platform using and returns new platform to user.
:rtype: QubellPlatform
:param str user: user email
:param str password: user password
:param str token: session token
:param bool is_public: either to use public or private api (public is not fully supported use with caution)
:return: New Platform instance
"""
return QubellPlatform.connect(self._router.base_url, user, password, token, is_public) |
def inbound_presence_filter(f):
"""
Register the decorated function as a service-level inbound presence filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
raise TypeError(
"inbound_presence_filter must not be a coroutine function"
)
add_handler_spec(
f,
HandlerSpec(
(_apply_inbound_presence_filter, ())
),
)
return f | def function[inbound_presence_filter, parameter[f]]:
constant[
Register the decorated function as a service-level inbound presence filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
]
if call[name[asyncio].iscoroutinefunction, parameter[name[f]]] begin[:]
<ast.Raise object at 0x7da1b2345570>
call[name[add_handler_spec], parameter[name[f], call[name[HandlerSpec], parameter[tuple[[<ast.Name object at 0x7da1b2345930>, <ast.Tuple object at 0x7da1b2345e70>]]]]]]
return[name[f]] | keyword[def] identifier[inbound_presence_filter] ( identifier[f] ):
literal[string]
keyword[if] identifier[asyncio] . identifier[iscoroutinefunction] ( identifier[f] ):
keyword[raise] identifier[TypeError] (
literal[string]
)
identifier[add_handler_spec] (
identifier[f] ,
identifier[HandlerSpec] (
( identifier[_apply_inbound_presence_filter] ,())
),
)
keyword[return] identifier[f] | def inbound_presence_filter(f):
"""
Register the decorated function as a service-level inbound presence filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
raise TypeError('inbound_presence_filter must not be a coroutine function') # depends on [control=['if'], data=[]]
add_handler_spec(f, HandlerSpec((_apply_inbound_presence_filter, ())))
return f |
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad | def function[init_streams_pad, parameter[self, start_row]]:
constant[ Create a curses pad and populate it with a line by stream ]
variable[y] assign[=] constant[0]
variable[pad] assign[=] call[name[curses].newpad, parameter[call[name[max], parameter[constant[1], call[name[len], parameter[name[self].filtered_streams]]]], name[self].pad_w]]
call[name[pad].keypad, parameter[constant[1]]]
for taget[name[s]] in starred[name[self].filtered_streams] begin[:]
call[name[pad].addstr, parameter[name[y], constant[0], call[name[self].format_stream_line, parameter[name[s]]]]]
<ast.AugAssign object at 0x7da18bc72830>
call[name[self].offsets][constant[streams]] assign[=] constant[0]
call[name[pad].move, parameter[name[start_row], constant[0]]]
if <ast.UnaryOp object at 0x7da20c6a8910> begin[:]
call[name[pad].chgat, parameter[name[curses].A_REVERSE]]
call[name[self].pads][constant[streams]] assign[=] name[pad] | keyword[def] identifier[init_streams_pad] ( identifier[self] , identifier[start_row] = literal[int] ):
literal[string]
identifier[y] = literal[int]
identifier[pad] = identifier[curses] . identifier[newpad] ( identifier[max] ( literal[int] , identifier[len] ( identifier[self] . identifier[filtered_streams] )), identifier[self] . identifier[pad_w] )
identifier[pad] . identifier[keypad] ( literal[int] )
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[filtered_streams] :
identifier[pad] . identifier[addstr] ( identifier[y] , literal[int] , identifier[self] . identifier[format_stream_line] ( identifier[s] ))
identifier[y] += literal[int]
identifier[self] . identifier[offsets] [ literal[string] ]= literal[int]
identifier[pad] . identifier[move] ( identifier[start_row] , literal[int] )
keyword[if] keyword[not] identifier[self] . identifier[no_stream_shown] :
identifier[pad] . identifier[chgat] ( identifier[curses] . identifier[A_REVERSE] )
identifier[self] . identifier[pads] [ literal[string] ]= identifier[pad] | def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1, len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y += 1 # depends on [control=['for'], data=['s']]
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE) # depends on [control=['if'], data=[]]
self.pads['streams'] = pad |
def register_applications(self, applications, models=None, backends=None):
'''A higher level registration functions for group of models located
on application modules.
It uses the :func:`model_iterator` function to iterate
through all :class:`Model` models available in ``applications``
and register them using the :func:`register` low level method.
:parameter applications: A String or a list of strings representing
python dotted paths where models are implemented.
:parameter models: Optional list of models to include. If not provided
all models found in *applications* will be included.
:parameter backends: optional dictionary which map a model or an
application to a backend :ref:`connection string <connection-string>`.
:rtype: A list of registered :class:`Model`.
For example::
mapper.register_application_models('mylib.myapp')
mapper.register_application_models(['mylib.myapp', 'another.path'])
mapper.register_application_models(pythonmodule)
mapper.register_application_models(['mylib.myapp',pythonmodule])
'''
return list(self._register_applications(applications, models,
backends)) | def function[register_applications, parameter[self, applications, models, backends]]:
constant[A higher level registration functions for group of models located
on application modules.
It uses the :func:`model_iterator` function to iterate
through all :class:`Model` models available in ``applications``
and register them using the :func:`register` low level method.
:parameter applications: A String or a list of strings representing
python dotted paths where models are implemented.
:parameter models: Optional list of models to include. If not provided
all models found in *applications* will be included.
:parameter backends: optional dictionary which map a model or an
application to a backend :ref:`connection string <connection-string>`.
:rtype: A list of registered :class:`Model`.
For example::
mapper.register_application_models('mylib.myapp')
mapper.register_application_models(['mylib.myapp', 'another.path'])
mapper.register_application_models(pythonmodule)
mapper.register_application_models(['mylib.myapp',pythonmodule])
]
return[call[name[list], parameter[call[name[self]._register_applications, parameter[name[applications], name[models], name[backends]]]]]] | keyword[def] identifier[register_applications] ( identifier[self] , identifier[applications] , identifier[models] = keyword[None] , identifier[backends] = keyword[None] ):
literal[string]
keyword[return] identifier[list] ( identifier[self] . identifier[_register_applications] ( identifier[applications] , identifier[models] ,
identifier[backends] )) | def register_applications(self, applications, models=None, backends=None):
"""A higher level registration functions for group of models located
on application modules.
It uses the :func:`model_iterator` function to iterate
through all :class:`Model` models available in ``applications``
and register them using the :func:`register` low level method.
:parameter applications: A String or a list of strings representing
python dotted paths where models are implemented.
:parameter models: Optional list of models to include. If not provided
all models found in *applications* will be included.
:parameter backends: optional dictionary which map a model or an
application to a backend :ref:`connection string <connection-string>`.
:rtype: A list of registered :class:`Model`.
For example::
mapper.register_application_models('mylib.myapp')
mapper.register_application_models(['mylib.myapp', 'another.path'])
mapper.register_application_models(pythonmodule)
mapper.register_application_models(['mylib.myapp',pythonmodule])
"""
return list(self._register_applications(applications, models, backends)) |
def predict(self, testing_features):
"""predict on a holdout data set."""
# print("best_inds:",self._best_inds)
# print("best estimator size:",self._best_estimator.coef_.shape)
if self.clean:
testing_features = self.impute_data(testing_features)
if self._best_inds:
X_transform = self.transform(testing_features)
try:
return self._best_estimator.predict(self.transform(testing_features))
except ValueError as detail:
# pdb.set_trace()
print('shape of X:',testing_features.shape)
print('shape of X_transform:',X_transform.transpose().shape)
print('best inds:',self.stacks_2_eqns(self._best_inds))
print('valid locs:',self.valid_loc(self._best_inds))
raise ValueError(detail)
else:
return self._best_estimator.predict(testing_features) | def function[predict, parameter[self, testing_features]]:
constant[predict on a holdout data set.]
if name[self].clean begin[:]
variable[testing_features] assign[=] call[name[self].impute_data, parameter[name[testing_features]]]
if name[self]._best_inds begin[:]
variable[X_transform] assign[=] call[name[self].transform, parameter[name[testing_features]]]
<ast.Try object at 0x7da18eb54190> | keyword[def] identifier[predict] ( identifier[self] , identifier[testing_features] ):
literal[string]
keyword[if] identifier[self] . identifier[clean] :
identifier[testing_features] = identifier[self] . identifier[impute_data] ( identifier[testing_features] )
keyword[if] identifier[self] . identifier[_best_inds] :
identifier[X_transform] = identifier[self] . identifier[transform] ( identifier[testing_features] )
keyword[try] :
keyword[return] identifier[self] . identifier[_best_estimator] . identifier[predict] ( identifier[self] . identifier[transform] ( identifier[testing_features] ))
keyword[except] identifier[ValueError] keyword[as] identifier[detail] :
identifier[print] ( literal[string] , identifier[testing_features] . identifier[shape] )
identifier[print] ( literal[string] , identifier[X_transform] . identifier[transpose] (). identifier[shape] )
identifier[print] ( literal[string] , identifier[self] . identifier[stacks_2_eqns] ( identifier[self] . identifier[_best_inds] ))
identifier[print] ( literal[string] , identifier[self] . identifier[valid_loc] ( identifier[self] . identifier[_best_inds] ))
keyword[raise] identifier[ValueError] ( identifier[detail] )
keyword[else] :
keyword[return] identifier[self] . identifier[_best_estimator] . identifier[predict] ( identifier[testing_features] ) | def predict(self, testing_features):
"""predict on a holdout data set."""
# print("best_inds:",self._best_inds)
# print("best estimator size:",self._best_estimator.coef_.shape)
if self.clean:
testing_features = self.impute_data(testing_features) # depends on [control=['if'], data=[]]
if self._best_inds:
X_transform = self.transform(testing_features)
try:
return self._best_estimator.predict(self.transform(testing_features)) # depends on [control=['try'], data=[]]
except ValueError as detail:
# pdb.set_trace()
print('shape of X:', testing_features.shape)
print('shape of X_transform:', X_transform.transpose().shape)
print('best inds:', self.stacks_2_eqns(self._best_inds))
print('valid locs:', self.valid_loc(self._best_inds))
raise ValueError(detail) # depends on [control=['except'], data=['detail']] # depends on [control=['if'], data=[]]
else:
return self._best_estimator.predict(testing_features) |
def _input_stmt(self, stmt: object) -> tuple:
"""
takes the input key from kwargs and processes it to aid in the generation of a model statement
:param stmt: str, list, or dict that contains the model information.
:return: tuple of strings one for the class statement one for the model statements
"""
code = ''
cls = ''
if isinstance(stmt, str):
code += "%s " % (stmt)
elif isinstance(stmt, dict):
try:
if 'interval' in stmt.keys():
if isinstance(stmt['interval'], str):
code += "%s " % stmt['interval']
if isinstance(stmt['interval'], list):
code += "%s " % " ".join(stmt['interval'])
if 'nominal' in stmt.keys():
if isinstance(stmt['nominal'], str):
code += "%s " % stmt['nominal']
cls += "%s " % stmt['nominal']
if isinstance(stmt['nominal'], list):
code += "%s " % " ".join(stmt['nominal'])
cls += "%s " % " ".join(stmt['nominal'])
except:
raise SyntaxError("Proper Keys not found for INPUT dictionary: %s" % stmt.keys())
elif isinstance(stmt, list):
if len(stmt) == 1:
code += "%s" % str(stmt[0])
elif len(stmt) > 1:
code += "%s" % " ".join(stmt)
else:
raise SyntaxError("The input list has no members")
else:
raise SyntaxError("INPUT is in an unknown format: %s" % str(stmt))
return (code, cls) | def function[_input_stmt, parameter[self, stmt]]:
constant[
takes the input key from kwargs and processes it to aid in the generation of a model statement
:param stmt: str, list, or dict that contains the model information.
:return: tuple of strings one for the class statement one for the model statements
]
variable[code] assign[=] constant[]
variable[cls] assign[=] constant[]
if call[name[isinstance], parameter[name[stmt], name[str]]] begin[:]
<ast.AugAssign object at 0x7da2044c24d0>
return[tuple[[<ast.Name object at 0x7da207f03b50>, <ast.Name object at 0x7da207f020b0>]]] | keyword[def] identifier[_input_stmt] ( identifier[self] , identifier[stmt] : identifier[object] )-> identifier[tuple] :
literal[string]
identifier[code] = literal[string]
identifier[cls] = literal[string]
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[str] ):
identifier[code] += literal[string] %( identifier[stmt] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[dict] ):
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[stmt] . identifier[keys] ():
keyword[if] identifier[isinstance] ( identifier[stmt] [ literal[string] ], identifier[str] ):
identifier[code] += literal[string] % identifier[stmt] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[stmt] [ literal[string] ], identifier[list] ):
identifier[code] += literal[string] % literal[string] . identifier[join] ( identifier[stmt] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[stmt] . identifier[keys] ():
keyword[if] identifier[isinstance] ( identifier[stmt] [ literal[string] ], identifier[str] ):
identifier[code] += literal[string] % identifier[stmt] [ literal[string] ]
identifier[cls] += literal[string] % identifier[stmt] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[stmt] [ literal[string] ], identifier[list] ):
identifier[code] += literal[string] % literal[string] . identifier[join] ( identifier[stmt] [ literal[string] ])
identifier[cls] += literal[string] % literal[string] . identifier[join] ( identifier[stmt] [ literal[string] ])
keyword[except] :
keyword[raise] identifier[SyntaxError] ( literal[string] % identifier[stmt] . identifier[keys] ())
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[list] ):
keyword[if] identifier[len] ( identifier[stmt] )== literal[int] :
identifier[code] += literal[string] % identifier[str] ( identifier[stmt] [ literal[int] ])
keyword[elif] identifier[len] ( identifier[stmt] )> literal[int] :
identifier[code] += literal[string] % literal[string] . identifier[join] ( identifier[stmt] )
keyword[else] :
keyword[raise] identifier[SyntaxError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[SyntaxError] ( literal[string] % identifier[str] ( identifier[stmt] ))
keyword[return] ( identifier[code] , identifier[cls] ) | def _input_stmt(self, stmt: object) -> tuple:
"""
takes the input key from kwargs and processes it to aid in the generation of a model statement
:param stmt: str, list, or dict that contains the model information.
:return: tuple of strings one for the class statement one for the model statements
"""
code = ''
cls = ''
if isinstance(stmt, str):
code += '%s ' % stmt # depends on [control=['if'], data=[]]
elif isinstance(stmt, dict):
try:
if 'interval' in stmt.keys():
if isinstance(stmt['interval'], str):
code += '%s ' % stmt['interval'] # depends on [control=['if'], data=[]]
if isinstance(stmt['interval'], list):
code += '%s ' % ' '.join(stmt['interval']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'nominal' in stmt.keys():
if isinstance(stmt['nominal'], str):
code += '%s ' % stmt['nominal']
cls += '%s ' % stmt['nominal'] # depends on [control=['if'], data=[]]
if isinstance(stmt['nominal'], list):
code += '%s ' % ' '.join(stmt['nominal'])
cls += '%s ' % ' '.join(stmt['nominal']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
raise SyntaxError('Proper Keys not found for INPUT dictionary: %s' % stmt.keys()) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(stmt, list):
if len(stmt) == 1:
code += '%s' % str(stmt[0]) # depends on [control=['if'], data=[]]
elif len(stmt) > 1:
code += '%s' % ' '.join(stmt) # depends on [control=['if'], data=[]]
else:
raise SyntaxError('The input list has no members') # depends on [control=['if'], data=[]]
else:
raise SyntaxError('INPUT is in an unknown format: %s' % str(stmt))
return (code, cls) |
def download(self,
files=None,
destination=None,
overwrite=False,
callback=None):
"""Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
"""
if files is None:
files = self.files
elif not isinstance(files, list):
files = [files]
if destination is None:
destination = os.path.expanduser('~')
for f in files:
if not isinstance(f, dict):
raise FMBaseError('File must be a <dict> with file data')
self._download(f, destination, overwrite, callback) | def function[download, parameter[self, files, destination, overwrite, callback]]:
constant[Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
]
if compare[name[files] is constant[None]] begin[:]
variable[files] assign[=] name[self].files
if compare[name[destination] is constant[None]] begin[:]
variable[destination] assign[=] call[name[os].path.expanduser, parameter[constant[~]]]
for taget[name[f]] in starred[name[files]] begin[:]
if <ast.UnaryOp object at 0x7da20c6aa0b0> begin[:]
<ast.Raise object at 0x7da20c6abca0>
call[name[self]._download, parameter[name[f], name[destination], name[overwrite], name[callback]]] | keyword[def] identifier[download] ( identifier[self] ,
identifier[files] = keyword[None] ,
identifier[destination] = keyword[None] ,
identifier[overwrite] = keyword[False] ,
identifier[callback] = keyword[None] ):
literal[string]
keyword[if] identifier[files] keyword[is] keyword[None] :
identifier[files] = identifier[self] . identifier[files]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[files] , identifier[list] ):
identifier[files] =[ identifier[files] ]
keyword[if] identifier[destination] keyword[is] keyword[None] :
identifier[destination] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[f] , identifier[dict] ):
keyword[raise] identifier[FMBaseError] ( literal[string] )
identifier[self] . identifier[_download] ( identifier[f] , identifier[destination] , identifier[overwrite] , identifier[callback] ) | def download(self, files=None, destination=None, overwrite=False, callback=None):
"""Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
"""
if files is None:
files = self.files # depends on [control=['if'], data=['files']]
elif not isinstance(files, list):
files = [files] # depends on [control=['if'], data=[]]
if destination is None:
destination = os.path.expanduser('~') # depends on [control=['if'], data=['destination']]
for f in files:
if not isinstance(f, dict):
raise FMBaseError('File must be a <dict> with file data') # depends on [control=['if'], data=[]]
self._download(f, destination, overwrite, callback) # depends on [control=['for'], data=['f']] |
def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
if workunit.outcome() != WorkUnit.SUCCESS and not self._show_output(workunit):
# Emit the suppressed workunit output, if any, to aid in debugging the problem.
if self._get_label_format(workunit) != LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
for name, outbuf in workunit.outputs().items():
self.emit(self._prefix(workunit, '\n==== {} ====\n'.format(name)))
self.emit(self._prefix(workunit, outbuf.read_from(0).decode('utf-8')))
self.flush() | def function[end_workunit, parameter[self, workunit]]:
constant[Implementation of Reporter callback.]
if <ast.UnaryOp object at 0x7da1b2291090> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b2290100> begin[:]
if compare[call[name[self]._get_label_format, parameter[name[workunit]]] not_equal[!=] name[LabelFormat].FULL] begin[:]
call[name[self]._emit_indented_workunit_label, parameter[name[workunit]]]
for taget[tuple[[<ast.Name object at 0x7da1b2291060>, <ast.Name object at 0x7da1b2291000>]]] in starred[call[call[name[workunit].outputs, parameter[]].items, parameter[]]] begin[:]
call[name[self].emit, parameter[call[name[self]._prefix, parameter[name[workunit], call[constant[
==== {} ====
].format, parameter[name[name]]]]]]]
call[name[self].emit, parameter[call[name[self]._prefix, parameter[name[workunit], call[call[name[outbuf].read_from, parameter[constant[0]]].decode, parameter[constant[utf-8]]]]]]]
call[name[self].flush, parameter[]] | keyword[def] identifier[end_workunit] ( identifier[self] , identifier[workunit] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_under_main_root] ( identifier[workunit] ):
keyword[return]
keyword[if] identifier[workunit] . identifier[outcome] ()!= identifier[WorkUnit] . identifier[SUCCESS] keyword[and] keyword[not] identifier[self] . identifier[_show_output] ( identifier[workunit] ):
keyword[if] identifier[self] . identifier[_get_label_format] ( identifier[workunit] )!= identifier[LabelFormat] . identifier[FULL] :
identifier[self] . identifier[_emit_indented_workunit_label] ( identifier[workunit] )
keyword[for] identifier[name] , identifier[outbuf] keyword[in] identifier[workunit] . identifier[outputs] (). identifier[items] ():
identifier[self] . identifier[emit] ( identifier[self] . identifier[_prefix] ( identifier[workunit] , literal[string] . identifier[format] ( identifier[name] )))
identifier[self] . identifier[emit] ( identifier[self] . identifier[_prefix] ( identifier[workunit] , identifier[outbuf] . identifier[read_from] ( literal[int] ). identifier[decode] ( literal[string] )))
identifier[self] . identifier[flush] () | def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return # depends on [control=['if'], data=[]]
if workunit.outcome() != WorkUnit.SUCCESS and (not self._show_output(workunit)):
# Emit the suppressed workunit output, if any, to aid in debugging the problem.
if self._get_label_format(workunit) != LabelFormat.FULL:
self._emit_indented_workunit_label(workunit) # depends on [control=['if'], data=[]]
for (name, outbuf) in workunit.outputs().items():
self.emit(self._prefix(workunit, '\n==== {} ====\n'.format(name)))
self.emit(self._prefix(workunit, outbuf.read_from(0).decode('utf-8')))
self.flush() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def delete_audit_sink(self, name, **kwargs):
"""
delete an AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_audit_sink(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the AuditSink (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_audit_sink_with_http_info(name, **kwargs)
else:
(data) = self.delete_audit_sink_with_http_info(name, **kwargs)
return data | def function[delete_audit_sink, parameter[self, name]]:
constant[
delete an AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_audit_sink(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the AuditSink (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].delete_audit_sink_with_http_info, parameter[name[name]]]] | keyword[def] identifier[delete_audit_sink] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[delete_audit_sink_with_http_info] ( identifier[name] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[delete_audit_sink_with_http_info] ( identifier[name] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_audit_sink(self, name, **kwargs):
"""
delete an AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_audit_sink(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the AuditSink (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_audit_sink_with_http_info(name, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.delete_audit_sink_with_http_info(name, **kwargs)
return data |
def to_float_with_default(value, default_value):
"""
Converts value into float or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: float value or default value when conversion is not supported.
"""
result = FloatConverter.to_nullable_float(value)
return result if result != None else default_value | def function[to_float_with_default, parameter[value, default_value]]:
constant[
Converts value into float or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: float value or default value when conversion is not supported.
]
variable[result] assign[=] call[name[FloatConverter].to_nullable_float, parameter[name[value]]]
return[<ast.IfExp object at 0x7da1b16050c0>] | keyword[def] identifier[to_float_with_default] ( identifier[value] , identifier[default_value] ):
literal[string]
identifier[result] = identifier[FloatConverter] . identifier[to_nullable_float] ( identifier[value] )
keyword[return] identifier[result] keyword[if] identifier[result] != keyword[None] keyword[else] identifier[default_value] | def to_float_with_default(value, default_value):
"""
Converts value into float or returns default when conversion is not possible.
:param value: the value to convert.
:param default_value: the default value.
:return: float value or default value when conversion is not supported.
"""
result = FloatConverter.to_nullable_float(value)
return result if result != None else default_value |
def make_clean_figure(figsize, remove_tooltips=False, remove_keybindings=False):
"""
Makes a `matplotlib.pyplot.Figure` without tooltips or keybindings
Parameters
----------
figsize : tuple
Figsize as passed to `matplotlib.pyplot.figure`
remove_tooltips, remove_keybindings : bool
Set to True to remove the tooltips bar or any key bindings,
respectively. Default is False
Returns
-------
fig : `matplotlib.pyplot.Figure`
"""
tooltip = mpl.rcParams['toolbar']
if remove_tooltips:
mpl.rcParams['toolbar'] = 'None'
fig = pl.figure(figsize=figsize)
mpl.rcParams['toolbar'] = tooltip
if remove_keybindings:
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
return fig | def function[make_clean_figure, parameter[figsize, remove_tooltips, remove_keybindings]]:
constant[
Makes a `matplotlib.pyplot.Figure` without tooltips or keybindings
Parameters
----------
figsize : tuple
Figsize as passed to `matplotlib.pyplot.figure`
remove_tooltips, remove_keybindings : bool
Set to True to remove the tooltips bar or any key bindings,
respectively. Default is False
Returns
-------
fig : `matplotlib.pyplot.Figure`
]
variable[tooltip] assign[=] call[name[mpl].rcParams][constant[toolbar]]
if name[remove_tooltips] begin[:]
call[name[mpl].rcParams][constant[toolbar]] assign[=] constant[None]
variable[fig] assign[=] call[name[pl].figure, parameter[]]
call[name[mpl].rcParams][constant[toolbar]] assign[=] name[tooltip]
if name[remove_keybindings] begin[:]
call[name[fig].canvas.mpl_disconnect, parameter[name[fig].canvas.manager.key_press_handler_id]]
return[name[fig]] | keyword[def] identifier[make_clean_figure] ( identifier[figsize] , identifier[remove_tooltips] = keyword[False] , identifier[remove_keybindings] = keyword[False] ):
literal[string]
identifier[tooltip] = identifier[mpl] . identifier[rcParams] [ literal[string] ]
keyword[if] identifier[remove_tooltips] :
identifier[mpl] . identifier[rcParams] [ literal[string] ]= literal[string]
identifier[fig] = identifier[pl] . identifier[figure] ( identifier[figsize] = identifier[figsize] )
identifier[mpl] . identifier[rcParams] [ literal[string] ]= identifier[tooltip]
keyword[if] identifier[remove_keybindings] :
identifier[fig] . identifier[canvas] . identifier[mpl_disconnect] ( identifier[fig] . identifier[canvas] . identifier[manager] . identifier[key_press_handler_id] )
keyword[return] identifier[fig] | def make_clean_figure(figsize, remove_tooltips=False, remove_keybindings=False):
"""
Makes a `matplotlib.pyplot.Figure` without tooltips or keybindings
Parameters
----------
figsize : tuple
Figsize as passed to `matplotlib.pyplot.figure`
remove_tooltips, remove_keybindings : bool
Set to True to remove the tooltips bar or any key bindings,
respectively. Default is False
Returns
-------
fig : `matplotlib.pyplot.Figure`
"""
tooltip = mpl.rcParams['toolbar']
if remove_tooltips:
mpl.rcParams['toolbar'] = 'None' # depends on [control=['if'], data=[]]
fig = pl.figure(figsize=figsize)
mpl.rcParams['toolbar'] = tooltip
if remove_keybindings:
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id) # depends on [control=['if'], data=[]]
return fig |
def init_atom_feed(self, feed):
"""
Initializing an atom feed `feedgen.feed.FeedGenerator` given a feed object
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = FeedGenerator()
atom_feed.id(id=self.request.route_url(self.get_atom_feed_url, id=feed.id))
atom_feed.link(href=self.request.route_url(self.get_atom_feed_url, id=feed.id), rel='self')
atom_feed.language('nl-BE')
self.link_to_sibling(feed, 'previous', atom_feed)
self.link_to_sibling(feed, 'next', atom_feed)
return atom_feed | def function[init_atom_feed, parameter[self, feed]]:
constant[
Initializing an atom feed `feedgen.feed.FeedGenerator` given a feed object
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
]
variable[atom_feed] assign[=] call[name[FeedGenerator], parameter[]]
call[name[atom_feed].id, parameter[]]
call[name[atom_feed].link, parameter[]]
call[name[atom_feed].language, parameter[constant[nl-BE]]]
call[name[self].link_to_sibling, parameter[name[feed], constant[previous], name[atom_feed]]]
call[name[self].link_to_sibling, parameter[name[feed], constant[next], name[atom_feed]]]
return[name[atom_feed]] | keyword[def] identifier[init_atom_feed] ( identifier[self] , identifier[feed] ):
literal[string]
identifier[atom_feed] = identifier[FeedGenerator] ()
identifier[atom_feed] . identifier[id] ( identifier[id] = identifier[self] . identifier[request] . identifier[route_url] ( identifier[self] . identifier[get_atom_feed_url] , identifier[id] = identifier[feed] . identifier[id] ))
identifier[atom_feed] . identifier[link] ( identifier[href] = identifier[self] . identifier[request] . identifier[route_url] ( identifier[self] . identifier[get_atom_feed_url] , identifier[id] = identifier[feed] . identifier[id] ), identifier[rel] = literal[string] )
identifier[atom_feed] . identifier[language] ( literal[string] )
identifier[self] . identifier[link_to_sibling] ( identifier[feed] , literal[string] , identifier[atom_feed] )
identifier[self] . identifier[link_to_sibling] ( identifier[feed] , literal[string] , identifier[atom_feed] )
keyword[return] identifier[atom_feed] | def init_atom_feed(self, feed):
"""
Initializing an atom feed `feedgen.feed.FeedGenerator` given a feed object
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = FeedGenerator()
atom_feed.id(id=self.request.route_url(self.get_atom_feed_url, id=feed.id))
atom_feed.link(href=self.request.route_url(self.get_atom_feed_url, id=feed.id), rel='self')
atom_feed.language('nl-BE')
self.link_to_sibling(feed, 'previous', atom_feed)
self.link_to_sibling(feed, 'next', atom_feed)
return atom_feed |
def json(self):
"""
Output the security rules as a json string.
Return:
str
"""
return json.dumps(self.dict_rules,
sort_keys=True,
indent=2,
separators=(',', ': ')) | def function[json, parameter[self]]:
constant[
Output the security rules as a json string.
Return:
str
]
return[call[name[json].dumps, parameter[name[self].dict_rules]]] | keyword[def] identifier[json] ( identifier[self] ):
literal[string]
keyword[return] identifier[json] . identifier[dumps] ( identifier[self] . identifier[dict_rules] ,
identifier[sort_keys] = keyword[True] ,
identifier[indent] = literal[int] ,
identifier[separators] =( literal[string] , literal[string] )) | def json(self):
"""
Output the security rules as a json string.
Return:
str
"""
return json.dumps(self.dict_rules, sort_keys=True, indent=2, separators=(',', ': ')) |
def predict(self, X):
""" Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : array, shape (n_samples,)
For each observations, tells whether or not (True or False) it should
be considered as an inlier according to the fitted model.
"""
# Check is fit had been called
check_is_fitted(self, ['_x_min', '_x_max'])
# Input validation
X = check_array(X)
return ((X - self._x_min).min(axis=1) >= 0) & ((self._x_max - X).min(axis=1) >= 0) | def function[predict, parameter[self, X]]:
constant[ Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : array, shape (n_samples,)
For each observations, tells whether or not (True or False) it should
be considered as an inlier according to the fitted model.
]
call[name[check_is_fitted], parameter[name[self], list[[<ast.Constant object at 0x7da1b00fa380>, <ast.Constant object at 0x7da1b00f9030>]]]]
variable[X] assign[=] call[name[check_array], parameter[name[X]]]
return[binary_operation[compare[call[binary_operation[name[X] - name[self]._x_min].min, parameter[]] greater_or_equal[>=] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> compare[call[binary_operation[name[self]._x_max - name[X]].min, parameter[]] greater_or_equal[>=] constant[0]]]] | keyword[def] identifier[predict] ( identifier[self] , identifier[X] ):
literal[string]
identifier[check_is_fitted] ( identifier[self] ,[ literal[string] , literal[string] ])
identifier[X] = identifier[check_array] ( identifier[X] )
keyword[return] (( identifier[X] - identifier[self] . identifier[_x_min] ). identifier[min] ( identifier[axis] = literal[int] )>= literal[int] )&(( identifier[self] . identifier[_x_max] - identifier[X] ). identifier[min] ( identifier[axis] = literal[int] )>= literal[int] ) | def predict(self, X):
""" Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : array, shape (n_samples,)
For each observations, tells whether or not (True or False) it should
be considered as an inlier according to the fitted model.
"""
# Check is fit had been called
check_is_fitted(self, ['_x_min', '_x_max'])
# Input validation
X = check_array(X)
return ((X - self._x_min).min(axis=1) >= 0) & ((self._x_max - X).min(axis=1) >= 0) |
def ports(self):
"""
:class:`~zhmcclient.PortManager`: Access to the :term:`Ports <Port>` of
this Adapter.
"""
# We do here some lazy loading.
if not self._ports:
family = self.get_property('adapter-family')
try:
port_type = self.port_type_by_family[family]
except KeyError:
port_type = None
self._ports = PortManager(self, port_type)
return self._ports | def function[ports, parameter[self]]:
constant[
:class:`~zhmcclient.PortManager`: Access to the :term:`Ports <Port>` of
this Adapter.
]
if <ast.UnaryOp object at 0x7da18f720dc0> begin[:]
variable[family] assign[=] call[name[self].get_property, parameter[constant[adapter-family]]]
<ast.Try object at 0x7da18f7210c0>
name[self]._ports assign[=] call[name[PortManager], parameter[name[self], name[port_type]]]
return[name[self]._ports] | keyword[def] identifier[ports] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_ports] :
identifier[family] = identifier[self] . identifier[get_property] ( literal[string] )
keyword[try] :
identifier[port_type] = identifier[self] . identifier[port_type_by_family] [ identifier[family] ]
keyword[except] identifier[KeyError] :
identifier[port_type] = keyword[None]
identifier[self] . identifier[_ports] = identifier[PortManager] ( identifier[self] , identifier[port_type] )
keyword[return] identifier[self] . identifier[_ports] | def ports(self):
"""
:class:`~zhmcclient.PortManager`: Access to the :term:`Ports <Port>` of
this Adapter.
"""
# We do here some lazy loading.
if not self._ports:
family = self.get_property('adapter-family')
try:
port_type = self.port_type_by_family[family] # depends on [control=['try'], data=[]]
except KeyError:
port_type = None # depends on [control=['except'], data=[]]
self._ports = PortManager(self, port_type) # depends on [control=['if'], data=[]]
return self._ports |
def validate_task(original_task):
"""
Validates task and adds default values for missing options using the
following steps.
1. If there is no input list specified or if it is None, the input spec is
assumed to be ['*'].
2. If there are not outputs specified, or if the output spec is None or an
empty list, the output spec is assumed to be ['*'].
3. If the input or output spec is not iterable, they are converted into
single element tuples. If they are any iterable, they are converted into
tuples.
4. The task['fn'] option must be callable.
5. If number of outputs is more than one, task['fn'] must be a generator
function.
6. Generator functions are not supported for output spec of '*'.
Returns new task with updated options
"""
task = original_task._asdict()
# Default values for inputs and outputs
if 'inputs' not in task or task['inputs'] is None:
task['inputs'] = ['*']
# Outputs list cannot be empty
if ('outputs' not in task or
task['outputs'] is None or
len(task['outputs']) == 0):
task['outputs'] = ['*']
# Convert to tuples (even for single values)
if not hasattr(task['inputs'], '__iter__') or isinstance(task['inputs'], str):
task['inputs'] = (task['inputs'],)
else:
task['inputs'] = tuple(task['inputs'])
if not hasattr(task['outputs'], '__iter__') or isinstance(task['outputs'], str):
task['outputs'] = (task['outputs'],)
else:
task['outputs'] = tuple(task['outputs'])
if not callable(task['fn']):
raise TypeError('Task function must be a callable object')
if (len(task['outputs']) > 1 and
not inspect.isgeneratorfunction(task['fn'])):
raise TypeError('Multiple outputs are only supported with \
generator functions')
if inspect.isgeneratorfunction(task['fn']):
if task['outputs'][0] == '*':
raise TypeError('Generator functions cannot be used for tasks with \
output specification "*"')
return Task(**task) | def function[validate_task, parameter[original_task]]:
constant[
Validates task and adds default values for missing options using the
following steps.
1. If there is no input list specified or if it is None, the input spec is
assumed to be ['*'].
2. If there are not outputs specified, or if the output spec is None or an
empty list, the output spec is assumed to be ['*'].
3. If the input or output spec is not iterable, they are converted into
single element tuples. If they are any iterable, they are converted into
tuples.
4. The task['fn'] option must be callable.
5. If number of outputs is more than one, task['fn'] must be a generator
function.
6. Generator functions are not supported for output spec of '*'.
Returns new task with updated options
]
variable[task] assign[=] call[name[original_task]._asdict, parameter[]]
if <ast.BoolOp object at 0x7da18dc9b490> begin[:]
call[name[task]][constant[inputs]] assign[=] list[[<ast.Constant object at 0x7da18dc998a0>]]
if <ast.BoolOp object at 0x7da18dc9a4a0> begin[:]
call[name[task]][constant[outputs]] assign[=] list[[<ast.Constant object at 0x7da18dc9a7a0>]]
if <ast.BoolOp object at 0x7da18dc9a290> begin[:]
call[name[task]][constant[inputs]] assign[=] tuple[[<ast.Subscript object at 0x7da20e962590>]]
if <ast.BoolOp object at 0x7da20e9b1780> begin[:]
call[name[task]][constant[outputs]] assign[=] tuple[[<ast.Subscript object at 0x7da20e9b34c0>]]
if <ast.UnaryOp object at 0x7da20c7c83a0> begin[:]
<ast.Raise object at 0x7da20c7cb700>
if <ast.BoolOp object at 0x7da20c7c9b40> begin[:]
<ast.Raise object at 0x7da20c7cb610>
if call[name[inspect].isgeneratorfunction, parameter[call[name[task]][constant[fn]]]] begin[:]
if compare[call[call[name[task]][constant[outputs]]][constant[0]] equal[==] constant[*]] begin[:]
<ast.Raise object at 0x7da20e955240>
return[call[name[Task], parameter[]]] | keyword[def] identifier[validate_task] ( identifier[original_task] ):
literal[string]
identifier[task] = identifier[original_task] . identifier[_asdict] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[task] keyword[or] identifier[task] [ literal[string] ] keyword[is] keyword[None] :
identifier[task] [ literal[string] ]=[ literal[string] ]
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[task] keyword[or]
identifier[task] [ literal[string] ] keyword[is] keyword[None] keyword[or]
identifier[len] ( identifier[task] [ literal[string] ])== literal[int] ):
identifier[task] [ literal[string] ]=[ literal[string] ]
keyword[if] keyword[not] identifier[hasattr] ( identifier[task] [ literal[string] ], literal[string] ) keyword[or] identifier[isinstance] ( identifier[task] [ literal[string] ], identifier[str] ):
identifier[task] [ literal[string] ]=( identifier[task] [ literal[string] ],)
keyword[else] :
identifier[task] [ literal[string] ]= identifier[tuple] ( identifier[task] [ literal[string] ])
keyword[if] keyword[not] identifier[hasattr] ( identifier[task] [ literal[string] ], literal[string] ) keyword[or] identifier[isinstance] ( identifier[task] [ literal[string] ], identifier[str] ):
identifier[task] [ literal[string] ]=( identifier[task] [ literal[string] ],)
keyword[else] :
identifier[task] [ literal[string] ]= identifier[tuple] ( identifier[task] [ literal[string] ])
keyword[if] keyword[not] identifier[callable] ( identifier[task] [ literal[string] ]):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] ( identifier[len] ( identifier[task] [ literal[string] ])> literal[int] keyword[and]
keyword[not] identifier[inspect] . identifier[isgeneratorfunction] ( identifier[task] [ literal[string] ])):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[inspect] . identifier[isgeneratorfunction] ( identifier[task] [ literal[string] ]):
keyword[if] identifier[task] [ literal[string] ][ literal[int] ]== literal[string] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[Task] (** identifier[task] ) | def validate_task(original_task):
"""
Validates task and adds default values for missing options using the
following steps.
1. If there is no input list specified or if it is None, the input spec is
assumed to be ['*'].
2. If there are not outputs specified, or if the output spec is None or an
empty list, the output spec is assumed to be ['*'].
3. If the input or output spec is not iterable, they are converted into
single element tuples. If they are any iterable, they are converted into
tuples.
4. The task['fn'] option must be callable.
5. If number of outputs is more than one, task['fn'] must be a generator
function.
6. Generator functions are not supported for output spec of '*'.
Returns new task with updated options
"""
task = original_task._asdict()
# Default values for inputs and outputs
if 'inputs' not in task or task['inputs'] is None:
task['inputs'] = ['*'] # depends on [control=['if'], data=[]]
# Outputs list cannot be empty
if 'outputs' not in task or task['outputs'] is None or len(task['outputs']) == 0:
task['outputs'] = ['*'] # depends on [control=['if'], data=[]]
# Convert to tuples (even for single values)
if not hasattr(task['inputs'], '__iter__') or isinstance(task['inputs'], str):
task['inputs'] = (task['inputs'],) # depends on [control=['if'], data=[]]
else:
task['inputs'] = tuple(task['inputs'])
if not hasattr(task['outputs'], '__iter__') or isinstance(task['outputs'], str):
task['outputs'] = (task['outputs'],) # depends on [control=['if'], data=[]]
else:
task['outputs'] = tuple(task['outputs'])
if not callable(task['fn']):
raise TypeError('Task function must be a callable object') # depends on [control=['if'], data=[]]
if len(task['outputs']) > 1 and (not inspect.isgeneratorfunction(task['fn'])):
raise TypeError('Multiple outputs are only supported with generator functions') # depends on [control=['if'], data=[]]
if inspect.isgeneratorfunction(task['fn']):
if task['outputs'][0] == '*':
raise TypeError('Generator functions cannot be used for tasks with output specification "*"') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return Task(**task) |
def _prepare_inputs(ma_fn, bam_file, out_dir):
"""
Convert to fastq with counts
"""
fixed_fa = os.path.join(out_dir, "file_reads.fa")
count_name =dict()
with file_transaction(fixed_fa) as out_tx:
with open(out_tx, 'w') as out_handle:
with open(ma_fn) as in_handle:
h = next(in_handle)
for line in in_handle:
cols = line.split("\t")
name_with_counts = "%s_x%s" % (cols[0], sum(map(int, cols[2:])))
count_name[cols[0]] = name_with_counts
out_handle.write(">%s\n%s\n" % (name_with_counts, cols[1]))
fixed_bam = os.path.join(out_dir, "align.bam")
bam_handle = pysam.AlignmentFile(bam_file, "rb")
with pysam.AlignmentFile(fixed_bam, "wb", template=bam_handle) as out_handle:
for read in bam_handle.fetch():
read.query_name = count_name[read.query_name]
out_handle.write(read)
return fixed_fa, fixed_bam | def function[_prepare_inputs, parameter[ma_fn, bam_file, out_dir]]:
constant[
Convert to fastq with counts
]
variable[fixed_fa] assign[=] call[name[os].path.join, parameter[name[out_dir], constant[file_reads.fa]]]
variable[count_name] assign[=] call[name[dict], parameter[]]
with call[name[file_transaction], parameter[name[fixed_fa]]] begin[:]
with call[name[open], parameter[name[out_tx], constant[w]]] begin[:]
with call[name[open], parameter[name[ma_fn]]] begin[:]
variable[h] assign[=] call[name[next], parameter[name[in_handle]]]
for taget[name[line]] in starred[name[in_handle]] begin[:]
variable[cols] assign[=] call[name[line].split, parameter[constant[ ]]]
variable[name_with_counts] assign[=] binary_operation[constant[%s_x%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b18332e0>, <ast.Call object at 0x7da1b1833250>]]]
call[name[count_name]][call[name[cols]][constant[0]]] assign[=] name[name_with_counts]
call[name[out_handle].write, parameter[binary_operation[constant[>%s
%s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1830eb0>, <ast.Subscript object at 0x7da1b1830ee0>]]]]]
variable[fixed_bam] assign[=] call[name[os].path.join, parameter[name[out_dir], constant[align.bam]]]
variable[bam_handle] assign[=] call[name[pysam].AlignmentFile, parameter[name[bam_file], constant[rb]]]
with call[name[pysam].AlignmentFile, parameter[name[fixed_bam], constant[wb]]] begin[:]
for taget[name[read]] in starred[call[name[bam_handle].fetch, parameter[]]] begin[:]
name[read].query_name assign[=] call[name[count_name]][name[read].query_name]
call[name[out_handle].write, parameter[name[read]]]
return[tuple[[<ast.Name object at 0x7da1b1830bb0>, <ast.Name object at 0x7da1b1830b80>]]] | keyword[def] identifier[_prepare_inputs] ( identifier[ma_fn] , identifier[bam_file] , identifier[out_dir] ):
literal[string]
identifier[fixed_fa] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] )
identifier[count_name] = identifier[dict] ()
keyword[with] identifier[file_transaction] ( identifier[fixed_fa] ) keyword[as] identifier[out_tx] :
keyword[with] identifier[open] ( identifier[out_tx] , literal[string] ) keyword[as] identifier[out_handle] :
keyword[with] identifier[open] ( identifier[ma_fn] ) keyword[as] identifier[in_handle] :
identifier[h] = identifier[next] ( identifier[in_handle] )
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
identifier[cols] = identifier[line] . identifier[split] ( literal[string] )
identifier[name_with_counts] = literal[string] %( identifier[cols] [ literal[int] ], identifier[sum] ( identifier[map] ( identifier[int] , identifier[cols] [ literal[int] :])))
identifier[count_name] [ identifier[cols] [ literal[int] ]]= identifier[name_with_counts]
identifier[out_handle] . identifier[write] ( literal[string] %( identifier[name_with_counts] , identifier[cols] [ literal[int] ]))
identifier[fixed_bam] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] )
identifier[bam_handle] = identifier[pysam] . identifier[AlignmentFile] ( identifier[bam_file] , literal[string] )
keyword[with] identifier[pysam] . identifier[AlignmentFile] ( identifier[fixed_bam] , literal[string] , identifier[template] = identifier[bam_handle] ) keyword[as] identifier[out_handle] :
keyword[for] identifier[read] keyword[in] identifier[bam_handle] . identifier[fetch] ():
identifier[read] . identifier[query_name] = identifier[count_name] [ identifier[read] . identifier[query_name] ]
identifier[out_handle] . identifier[write] ( identifier[read] )
keyword[return] identifier[fixed_fa] , identifier[fixed_bam] | def _prepare_inputs(ma_fn, bam_file, out_dir):
"""
Convert to fastq with counts
"""
fixed_fa = os.path.join(out_dir, 'file_reads.fa')
count_name = dict()
with file_transaction(fixed_fa) as out_tx:
with open(out_tx, 'w') as out_handle:
with open(ma_fn) as in_handle:
h = next(in_handle)
for line in in_handle:
cols = line.split('\t')
name_with_counts = '%s_x%s' % (cols[0], sum(map(int, cols[2:])))
count_name[cols[0]] = name_with_counts
out_handle.write('>%s\n%s\n' % (name_with_counts, cols[1])) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']] # depends on [control=['with'], data=['open', 'out_handle']] # depends on [control=['with'], data=['out_tx']]
fixed_bam = os.path.join(out_dir, 'align.bam')
bam_handle = pysam.AlignmentFile(bam_file, 'rb')
with pysam.AlignmentFile(fixed_bam, 'wb', template=bam_handle) as out_handle:
for read in bam_handle.fetch():
read.query_name = count_name[read.query_name]
out_handle.write(read) # depends on [control=['for'], data=['read']] # depends on [control=['with'], data=['out_handle']]
return (fixed_fa, fixed_bam) |
def create_column(self, name, type):
"""Create a new column ``name`` of a specified type.
::
table.create_column('created_at', db.types.datetime)
`type` corresponds to an SQLAlchemy type as described by `dataset.db.Types`
"""
name = normalize_column_name(name)
if self.has_column(name):
log.debug("Column exists: %s" % name)
return
self._sync_table((Column(name, type),)) | def function[create_column, parameter[self, name, type]]:
constant[Create a new column ``name`` of a specified type.
::
table.create_column('created_at', db.types.datetime)
`type` corresponds to an SQLAlchemy type as described by `dataset.db.Types`
]
variable[name] assign[=] call[name[normalize_column_name], parameter[name[name]]]
if call[name[self].has_column, parameter[name[name]]] begin[:]
call[name[log].debug, parameter[binary_operation[constant[Column exists: %s] <ast.Mod object at 0x7da2590d6920> name[name]]]]
return[None]
call[name[self]._sync_table, parameter[tuple[[<ast.Call object at 0x7da20e955b10>]]]] | keyword[def] identifier[create_column] ( identifier[self] , identifier[name] , identifier[type] ):
literal[string]
identifier[name] = identifier[normalize_column_name] ( identifier[name] )
keyword[if] identifier[self] . identifier[has_column] ( identifier[name] ):
identifier[log] . identifier[debug] ( literal[string] % identifier[name] )
keyword[return]
identifier[self] . identifier[_sync_table] (( identifier[Column] ( identifier[name] , identifier[type] ),)) | def create_column(self, name, type):
"""Create a new column ``name`` of a specified type.
::
table.create_column('created_at', db.types.datetime)
`type` corresponds to an SQLAlchemy type as described by `dataset.db.Types`
"""
name = normalize_column_name(name)
if self.has_column(name):
log.debug('Column exists: %s' % name)
return # depends on [control=['if'], data=[]]
self._sync_table((Column(name, type),)) |
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
"""
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
# proposed for <port> since it may be a mac address which will differ
# across units this allowing first-known-good to be chosen.
_mappings = parse_mappings(mappings, key_rvalue=True)
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {mappings.split()[0]: default_bridge}
ports = _mappings.keys()
if len(set(ports)) != len(ports):
raise Exception("It is not allowed to have the same port configured "
"on more than one bridge")
return _mappings | def function[parse_data_port_mappings, parameter[mappings, default_bridge]]:
constant[Parse data port mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
]
variable[_mappings] assign[=] call[name[parse_mappings], parameter[name[mappings]]]
if <ast.BoolOp object at 0x7da1b124a2c0> begin[:]
if <ast.UnaryOp object at 0x7da1b124aaa0> begin[:]
return[dictionary[[], []]]
variable[_mappings] assign[=] dictionary[[<ast.Subscript object at 0x7da1b124b400>], [<ast.Name object at 0x7da1b1249d20>]]
variable[ports] assign[=] call[name[_mappings].keys, parameter[]]
if compare[call[name[len], parameter[call[name[set], parameter[name[ports]]]]] not_equal[!=] call[name[len], parameter[name[ports]]]] begin[:]
<ast.Raise object at 0x7da1b1248790>
return[name[_mappings]] | keyword[def] identifier[parse_data_port_mappings] ( identifier[mappings] , identifier[default_bridge] = literal[string] ):
literal[string]
identifier[_mappings] = identifier[parse_mappings] ( identifier[mappings] , identifier[key_rvalue] = keyword[True] )
keyword[if] keyword[not] identifier[_mappings] keyword[or] identifier[list] ( identifier[_mappings] . identifier[values] ())==[ literal[string] ]:
keyword[if] keyword[not] identifier[mappings] :
keyword[return] {}
identifier[_mappings] ={ identifier[mappings] . identifier[split] ()[ literal[int] ]: identifier[default_bridge] }
identifier[ports] = identifier[_mappings] . identifier[keys] ()
keyword[if] identifier[len] ( identifier[set] ( identifier[ports] ))!= identifier[len] ( identifier[ports] ):
keyword[raise] identifier[Exception] ( literal[string]
literal[string] )
keyword[return] identifier[_mappings] | def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
"""
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
# proposed for <port> since it may be a mac address which will differ
# across units this allowing first-known-good to be chosen.
_mappings = parse_mappings(mappings, key_rvalue=True)
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {} # depends on [control=['if'], data=[]]
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {mappings.split()[0]: default_bridge} # depends on [control=['if'], data=[]]
ports = _mappings.keys()
if len(set(ports)) != len(ports):
raise Exception('It is not allowed to have the same port configured on more than one bridge') # depends on [control=['if'], data=[]]
return _mappings |
def _haab_count(day, month):
'''Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365'''
if day < 0 or day > 19:
raise IndexError("Invalid day number")
try:
i = HAAB_MONTHS.index(month)
except ValueError:
raise ValueError("'{0}' is not a valid Haab' month".format(month))
return min(i * 20, 360) + day | def function[_haab_count, parameter[day, month]]:
constant[Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365]
if <ast.BoolOp object at 0x7da1b0e2d360> begin[:]
<ast.Raise object at 0x7da1b0e2c430>
<ast.Try object at 0x7da1b0e2c550>
return[binary_operation[call[name[min], parameter[binary_operation[name[i] * constant[20]], constant[360]]] + name[day]]] | keyword[def] identifier[_haab_count] ( identifier[day] , identifier[month] ):
literal[string]
keyword[if] identifier[day] < literal[int] keyword[or] identifier[day] > literal[int] :
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[try] :
identifier[i] = identifier[HAAB_MONTHS] . identifier[index] ( identifier[month] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[month] ))
keyword[return] identifier[min] ( identifier[i] * literal[int] , literal[int] )+ identifier[day] | def _haab_count(day, month):
"""Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365"""
if day < 0 or day > 19:
raise IndexError('Invalid day number') # depends on [control=['if'], data=[]]
try:
i = HAAB_MONTHS.index(month) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError("'{0}' is not a valid Haab' month".format(month)) # depends on [control=['except'], data=[]]
return min(i * 20, 360) + day |
def getBucketIndices(self, input, learn=None):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]
"""
self.recordNum +=1
if learn is None:
learn = self._learningEnabled
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
self._setMinAndMax(input, learn)
return super(AdaptiveScalarEncoder, self).getBucketIndices(input) | def function[getBucketIndices, parameter[self, input, learn]]:
constant[
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]
]
<ast.AugAssign object at 0x7da20c7cb0d0>
if compare[name[learn] is constant[None]] begin[:]
variable[learn] assign[=] name[self]._learningEnabled
if <ast.BoolOp object at 0x7da20c7c8070> begin[:]
variable[input] assign[=] name[SENTINEL_VALUE_FOR_MISSING_DATA]
if compare[name[input] equal[==] name[SENTINEL_VALUE_FOR_MISSING_DATA]] begin[:]
return[list[[<ast.Constant object at 0x7da20c7cb4c0>]]] | keyword[def] identifier[getBucketIndices] ( identifier[self] , identifier[input] , identifier[learn] = keyword[None] ):
literal[string]
identifier[self] . identifier[recordNum] += literal[int]
keyword[if] identifier[learn] keyword[is] keyword[None] :
identifier[learn] = identifier[self] . identifier[_learningEnabled]
keyword[if] identifier[type] ( identifier[input] ) keyword[is] identifier[float] keyword[and] identifier[math] . identifier[isnan] ( identifier[input] ):
identifier[input] = identifier[SENTINEL_VALUE_FOR_MISSING_DATA]
keyword[if] identifier[input] == identifier[SENTINEL_VALUE_FOR_MISSING_DATA] :
keyword[return] [ keyword[None] ]
keyword[else] :
identifier[self] . identifier[_setMinAndMax] ( identifier[input] , identifier[learn] )
keyword[return] identifier[super] ( identifier[AdaptiveScalarEncoder] , identifier[self] ). identifier[getBucketIndices] ( identifier[input] ) | def getBucketIndices(self, input, learn=None):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]
"""
self.recordNum += 1
if learn is None:
learn = self._learningEnabled # depends on [control=['if'], data=['learn']]
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA # depends on [control=['if'], data=[]]
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None] # depends on [control=['if'], data=[]]
else:
self._setMinAndMax(input, learn)
return super(AdaptiveScalarEncoder, self).getBucketIndices(input) |
def _getshapes_2d(center, max_radius, shape):
"""Calculate indices and slices for the bounding box of a disk."""
index_mean = shape * center
index_radius = max_radius / 2.0 * np.array(shape)
# Avoid negative indices
min_idx = np.maximum(np.floor(index_mean - index_radius), 0).astype(int)
max_idx = np.ceil(index_mean + index_radius).astype(int)
idx = [slice(minx, maxx) for minx, maxx in zip(min_idx, max_idx)]
shapes = [(idx[0], slice(None)),
(slice(None), idx[1])]
return tuple(idx), tuple(shapes) | def function[_getshapes_2d, parameter[center, max_radius, shape]]:
constant[Calculate indices and slices for the bounding box of a disk.]
variable[index_mean] assign[=] binary_operation[name[shape] * name[center]]
variable[index_radius] assign[=] binary_operation[binary_operation[name[max_radius] / constant[2.0]] * call[name[np].array, parameter[name[shape]]]]
variable[min_idx] assign[=] call[call[name[np].maximum, parameter[call[name[np].floor, parameter[binary_operation[name[index_mean] - name[index_radius]]]], constant[0]]].astype, parameter[name[int]]]
variable[max_idx] assign[=] call[call[name[np].ceil, parameter[binary_operation[name[index_mean] + name[index_radius]]]].astype, parameter[name[int]]]
variable[idx] assign[=] <ast.ListComp object at 0x7da1b1ec68f0>
variable[shapes] assign[=] list[[<ast.Tuple object at 0x7da1b1ec47c0>, <ast.Tuple object at 0x7da1b1ec6ad0>]]
return[tuple[[<ast.Call object at 0x7da1b1ec7940>, <ast.Call object at 0x7da1b1ec4820>]]] | keyword[def] identifier[_getshapes_2d] ( identifier[center] , identifier[max_radius] , identifier[shape] ):
literal[string]
identifier[index_mean] = identifier[shape] * identifier[center]
identifier[index_radius] = identifier[max_radius] / literal[int] * identifier[np] . identifier[array] ( identifier[shape] )
identifier[min_idx] = identifier[np] . identifier[maximum] ( identifier[np] . identifier[floor] ( identifier[index_mean] - identifier[index_radius] ), literal[int] ). identifier[astype] ( identifier[int] )
identifier[max_idx] = identifier[np] . identifier[ceil] ( identifier[index_mean] + identifier[index_radius] ). identifier[astype] ( identifier[int] )
identifier[idx] =[ identifier[slice] ( identifier[minx] , identifier[maxx] ) keyword[for] identifier[minx] , identifier[maxx] keyword[in] identifier[zip] ( identifier[min_idx] , identifier[max_idx] )]
identifier[shapes] =[( identifier[idx] [ literal[int] ], identifier[slice] ( keyword[None] )),
( identifier[slice] ( keyword[None] ), identifier[idx] [ literal[int] ])]
keyword[return] identifier[tuple] ( identifier[idx] ), identifier[tuple] ( identifier[shapes] ) | def _getshapes_2d(center, max_radius, shape):
"""Calculate indices and slices for the bounding box of a disk."""
index_mean = shape * center
index_radius = max_radius / 2.0 * np.array(shape)
# Avoid negative indices
min_idx = np.maximum(np.floor(index_mean - index_radius), 0).astype(int)
max_idx = np.ceil(index_mean + index_radius).astype(int)
idx = [slice(minx, maxx) for (minx, maxx) in zip(min_idx, max_idx)]
shapes = [(idx[0], slice(None)), (slice(None), idx[1])]
return (tuple(idx), tuple(shapes)) |
def set_seeds(self, seeds):
"""
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
"""
if self.img.shape != seeds.shape:
raise Exception("Seeds must be same size as input image")
self.seeds = seeds.astype("int8")
self.voxels1 = self.img[self.seeds == 1]
self.voxels2 = self.img[self.seeds == 2] | def function[set_seeds, parameter[self, seeds]]:
constant[
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
]
if compare[name[self].img.shape not_equal[!=] name[seeds].shape] begin[:]
<ast.Raise object at 0x7da1b1083e20>
name[self].seeds assign[=] call[name[seeds].astype, parameter[constant[int8]]]
name[self].voxels1 assign[=] call[name[self].img][compare[name[self].seeds equal[==] constant[1]]]
name[self].voxels2 assign[=] call[name[self].img][compare[name[self].seeds equal[==] constant[2]]] | keyword[def] identifier[set_seeds] ( identifier[self] , identifier[seeds] ):
literal[string]
keyword[if] identifier[self] . identifier[img] . identifier[shape] != identifier[seeds] . identifier[shape] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[seeds] = identifier[seeds] . identifier[astype] ( literal[string] )
identifier[self] . identifier[voxels1] = identifier[self] . identifier[img] [ identifier[self] . identifier[seeds] == literal[int] ]
identifier[self] . identifier[voxels2] = identifier[self] . identifier[img] [ identifier[self] . identifier[seeds] == literal[int] ] | def set_seeds(self, seeds):
"""
Function for manual seed setting. Sets variable seeds and prepares
voxels for density model.
:param seeds: ndarray (0 - nothing, 1 - object, 2 - background,
3 - object just hard constraints, no model training, 4 - background
just hard constraints, no model training)
"""
if self.img.shape != seeds.shape:
raise Exception('Seeds must be same size as input image') # depends on [control=['if'], data=[]]
self.seeds = seeds.astype('int8')
self.voxels1 = self.img[self.seeds == 1]
self.voxels2 = self.img[self.seeds == 2] |
def parse(self, light=False):
""" Parses data from Wikipedia page markup.
The markup comes from Wikipedia's edit page.
We parse it here into objects containing plain text.
The light version parses only links to other articles, it's faster than a full parse.
"""
markup = self.markup
self.disambiguation = self.parse_disambiguation(markup)
self.categories = self.parse_categories(markup)
self.links = self.parse_links(markup)
if not light:
# Conversion of HTML markup to Wikipedia markup.
markup = self.convert_pre(markup)
markup = self.convert_li(markup)
markup = self.convert_table(markup)
markup = replace_entities(markup)
# Harvest references from the markup
# and replace them by footnotes.
markup = markup.replace("{{Cite", "{{cite")
markup = re.sub("\{\{ {1,2}cite", "{{cite", markup)
self.references, markup = self.parse_references(markup)
# Make sure there are no legend linebreaks in image links.
# Then harvest images and strip them from the markup.
markup = re.sub("\n+(\{\{legend)", "\\1", markup)
self.images, markup = self.parse_images(markup)
self.images.extend(self.parse_gallery_images(markup))
self.paragraphs = self.parse_paragraphs(markup)
self.tables = self.parse_tables(markup)
self.translations = self.parse_translations(markup)
self.important = self.parse_important(markup) | def function[parse, parameter[self, light]]:
constant[ Parses data from Wikipedia page markup.
The markup comes from Wikipedia's edit page.
We parse it here into objects containing plain text.
The light version parses only links to other articles, it's faster than a full parse.
]
variable[markup] assign[=] name[self].markup
name[self].disambiguation assign[=] call[name[self].parse_disambiguation, parameter[name[markup]]]
name[self].categories assign[=] call[name[self].parse_categories, parameter[name[markup]]]
name[self].links assign[=] call[name[self].parse_links, parameter[name[markup]]]
if <ast.UnaryOp object at 0x7da2041dbd30> begin[:]
variable[markup] assign[=] call[name[self].convert_pre, parameter[name[markup]]]
variable[markup] assign[=] call[name[self].convert_li, parameter[name[markup]]]
variable[markup] assign[=] call[name[self].convert_table, parameter[name[markup]]]
variable[markup] assign[=] call[name[replace_entities], parameter[name[markup]]]
variable[markup] assign[=] call[name[markup].replace, parameter[constant[{{Cite], constant[{{cite]]]
variable[markup] assign[=] call[name[re].sub, parameter[constant[\{\{ {1,2}cite], constant[{{cite], name[markup]]]
<ast.Tuple object at 0x7da2041dacb0> assign[=] call[name[self].parse_references, parameter[name[markup]]]
variable[markup] assign[=] call[name[re].sub, parameter[constant[
+(\{\{legend)], constant[\1], name[markup]]]
<ast.Tuple object at 0x7da1aff569e0> assign[=] call[name[self].parse_images, parameter[name[markup]]]
call[name[self].images.extend, parameter[call[name[self].parse_gallery_images, parameter[name[markup]]]]]
name[self].paragraphs assign[=] call[name[self].parse_paragraphs, parameter[name[markup]]]
name[self].tables assign[=] call[name[self].parse_tables, parameter[name[markup]]]
name[self].translations assign[=] call[name[self].parse_translations, parameter[name[markup]]]
name[self].important assign[=] call[name[self].parse_important, parameter[name[markup]]] | keyword[def] identifier[parse] ( identifier[self] , identifier[light] = keyword[False] ):
literal[string]
identifier[markup] = identifier[self] . identifier[markup]
identifier[self] . identifier[disambiguation] = identifier[self] . identifier[parse_disambiguation] ( identifier[markup] )
identifier[self] . identifier[categories] = identifier[self] . identifier[parse_categories] ( identifier[markup] )
identifier[self] . identifier[links] = identifier[self] . identifier[parse_links] ( identifier[markup] )
keyword[if] keyword[not] identifier[light] :
identifier[markup] = identifier[self] . identifier[convert_pre] ( identifier[markup] )
identifier[markup] = identifier[self] . identifier[convert_li] ( identifier[markup] )
identifier[markup] = identifier[self] . identifier[convert_table] ( identifier[markup] )
identifier[markup] = identifier[replace_entities] ( identifier[markup] )
identifier[markup] = identifier[markup] . identifier[replace] ( literal[string] , literal[string] )
identifier[markup] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[markup] )
identifier[self] . identifier[references] , identifier[markup] = identifier[self] . identifier[parse_references] ( identifier[markup] )
identifier[markup] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[markup] )
identifier[self] . identifier[images] , identifier[markup] = identifier[self] . identifier[parse_images] ( identifier[markup] )
identifier[self] . identifier[images] . identifier[extend] ( identifier[self] . identifier[parse_gallery_images] ( identifier[markup] ))
identifier[self] . identifier[paragraphs] = identifier[self] . identifier[parse_paragraphs] ( identifier[markup] )
identifier[self] . identifier[tables] = identifier[self] . identifier[parse_tables] ( identifier[markup] )
identifier[self] . identifier[translations] = identifier[self] . identifier[parse_translations] ( identifier[markup] )
identifier[self] . identifier[important] = identifier[self] . identifier[parse_important] ( identifier[markup] ) | def parse(self, light=False):
""" Parses data from Wikipedia page markup.
The markup comes from Wikipedia's edit page.
We parse it here into objects containing plain text.
The light version parses only links to other articles, it's faster than a full parse.
"""
markup = self.markup
self.disambiguation = self.parse_disambiguation(markup)
self.categories = self.parse_categories(markup)
self.links = self.parse_links(markup)
if not light:
# Conversion of HTML markup to Wikipedia markup.
markup = self.convert_pre(markup)
markup = self.convert_li(markup)
markup = self.convert_table(markup)
markup = replace_entities(markup)
# Harvest references from the markup
# and replace them by footnotes.
markup = markup.replace('{{Cite', '{{cite')
markup = re.sub('\\{\\{ {1,2}cite', '{{cite', markup)
(self.references, markup) = self.parse_references(markup)
# Make sure there are no legend linebreaks in image links.
# Then harvest images and strip them from the markup.
markup = re.sub('\n+(\\{\\{legend)', '\\1', markup)
(self.images, markup) = self.parse_images(markup)
self.images.extend(self.parse_gallery_images(markup))
self.paragraphs = self.parse_paragraphs(markup)
self.tables = self.parse_tables(markup)
self.translations = self.parse_translations(markup)
self.important = self.parse_important(markup) # depends on [control=['if'], data=[]] |
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json()) | def function[roles, parameter[self]]:
constant[gets user groups]
variable[result] assign[=] call[call[name[AuthGroup].objects, parameter[]].only, parameter[constant[role]]]
return[call[name[json].loads, parameter[call[name[result].to_json, parameter[]]]]] | keyword[def] identifier[roles] ( identifier[self] ):
literal[string]
identifier[result] = identifier[AuthGroup] . identifier[objects] ( identifier[creator] = identifier[self] . identifier[client] ). identifier[only] ( literal[string] )
keyword[return] identifier[json] . identifier[loads] ( identifier[result] . identifier[to_json] ()) | def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json()) |
def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age
result = self._request('GET', '/aggregates/{}'.format(check),
data=json.dumps(data))
return result.json() | def function[get_aggregate_check, parameter[self, check, age]]:
constant[
Returns the list of aggregates for a given check
]
variable[data] assign[=] dictionary[[], []]
if name[age] begin[:]
call[name[data]][constant[max_age]] assign[=] name[age]
variable[result] assign[=] call[name[self]._request, parameter[constant[GET], call[constant[/aggregates/{}].format, parameter[name[check]]]]]
return[call[name[result].json, parameter[]]] | keyword[def] identifier[get_aggregate_check] ( identifier[self] , identifier[check] , identifier[age] = keyword[None] ):
literal[string]
identifier[data] ={}
keyword[if] identifier[age] :
identifier[data] [ literal[string] ]= identifier[age]
identifier[result] = identifier[self] . identifier[_request] ( literal[string] , literal[string] . identifier[format] ( identifier[check] ),
identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] ))
keyword[return] identifier[result] . identifier[json] () | def get_aggregate_check(self, check, age=None):
"""
Returns the list of aggregates for a given check
"""
data = {}
if age:
data['max_age'] = age # depends on [control=['if'], data=[]]
result = self._request('GET', '/aggregates/{}'.format(check), data=json.dumps(data))
return result.json() |
def _format_command_usage(commands):
"""
Construct the Commands-part of the usage text.
Parameters
----------
commands : dict[str, func]
dictionary of supported commands.
Each entry should be a tuple of (name, function).
Returns
-------
str
Text formatted as a description of the commands.
"""
if not commands:
return ""
command_usage = "\nCommands:\n"
cmd_len = max([len(c) for c in commands] + [8])
command_doc = OrderedDict(
[(cmd_name, _get_first_line_of_docstring(cmd_doc))
for cmd_name, cmd_doc in commands.items()])
for cmd_name, cmd_doc in command_doc.items():
command_usage += (" {:%d} {}\n" % cmd_len).format(cmd_name, cmd_doc)
return command_usage | def function[_format_command_usage, parameter[commands]]:
constant[
Construct the Commands-part of the usage text.
Parameters
----------
commands : dict[str, func]
dictionary of supported commands.
Each entry should be a tuple of (name, function).
Returns
-------
str
Text formatted as a description of the commands.
]
if <ast.UnaryOp object at 0x7da1b18fa7a0> begin[:]
return[constant[]]
variable[command_usage] assign[=] constant[
Commands:
]
variable[cmd_len] assign[=] call[name[max], parameter[binary_operation[<ast.ListComp object at 0x7da1b18fbf70> + list[[<ast.Constant object at 0x7da1b18fb550>]]]]]
variable[command_doc] assign[=] call[name[OrderedDict], parameter[<ast.ListComp object at 0x7da1b18fb9a0>]]
for taget[tuple[[<ast.Name object at 0x7da1b18fb9d0>, <ast.Name object at 0x7da1b18f9060>]]] in starred[call[name[command_doc].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b18f90f0>
return[name[command_usage]] | keyword[def] identifier[_format_command_usage] ( identifier[commands] ):
literal[string]
keyword[if] keyword[not] identifier[commands] :
keyword[return] literal[string]
identifier[command_usage] = literal[string]
identifier[cmd_len] = identifier[max] ([ identifier[len] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[commands] ]+[ literal[int] ])
identifier[command_doc] = identifier[OrderedDict] (
[( identifier[cmd_name] , identifier[_get_first_line_of_docstring] ( identifier[cmd_doc] ))
keyword[for] identifier[cmd_name] , identifier[cmd_doc] keyword[in] identifier[commands] . identifier[items] ()])
keyword[for] identifier[cmd_name] , identifier[cmd_doc] keyword[in] identifier[command_doc] . identifier[items] ():
identifier[command_usage] +=( literal[string] % identifier[cmd_len] ). identifier[format] ( identifier[cmd_name] , identifier[cmd_doc] )
keyword[return] identifier[command_usage] | def _format_command_usage(commands):
"""
Construct the Commands-part of the usage text.
Parameters
----------
commands : dict[str, func]
dictionary of supported commands.
Each entry should be a tuple of (name, function).
Returns
-------
str
Text formatted as a description of the commands.
"""
if not commands:
return '' # depends on [control=['if'], data=[]]
command_usage = '\nCommands:\n'
cmd_len = max([len(c) for c in commands] + [8])
command_doc = OrderedDict([(cmd_name, _get_first_line_of_docstring(cmd_doc)) for (cmd_name, cmd_doc) in commands.items()])
for (cmd_name, cmd_doc) in command_doc.items():
command_usage += (' {:%d} {}\n' % cmd_len).format(cmd_name, cmd_doc) # depends on [control=['for'], data=[]]
return command_usage |
def to_text_format(self):
'''Format as detached DNS information as text.'''
return '\n'.join(itertools.chain(
(self.fetch_date.strftime('%Y%m%d%H%M%S'), ),
(rr.to_text() for rr in self.resource_records),
(),
)) | def function[to_text_format, parameter[self]]:
constant[Format as detached DNS information as text.]
return[call[constant[
].join, parameter[call[name[itertools].chain, parameter[tuple[[<ast.Call object at 0x7da2054a67d0>]], <ast.GeneratorExp object at 0x7da2054a6020>, tuple[[]]]]]]] | keyword[def] identifier[to_text_format] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[itertools] . identifier[chain] (
( identifier[self] . identifier[fetch_date] . identifier[strftime] ( literal[string] ),),
( identifier[rr] . identifier[to_text] () keyword[for] identifier[rr] keyword[in] identifier[self] . identifier[resource_records] ),
(),
)) | def to_text_format(self):
"""Format as detached DNS information as text."""
return '\n'.join(itertools.chain((self.fetch_date.strftime('%Y%m%d%H%M%S'),), (rr.to_text() for rr in self.resource_records), ())) |
def add_prefix(prefix, s):
"Add `prefix` to all unprefixed names in `s`"
# tokenize the XPath expression
toks = xpath_lexer.scan(s)
# add default prefix to unprefixed names
toks2 = [_add_prefix(prefix, tok) for tok in toks]
# build a string of the patched expression
ls = [x.value for x in toks2]
return ''.join(ls) | def function[add_prefix, parameter[prefix, s]]:
constant[Add `prefix` to all unprefixed names in `s`]
variable[toks] assign[=] call[name[xpath_lexer].scan, parameter[name[s]]]
variable[toks2] assign[=] <ast.ListComp object at 0x7da20cabd270>
variable[ls] assign[=] <ast.ListComp object at 0x7da20c992680>
return[call[constant[].join, parameter[name[ls]]]] | keyword[def] identifier[add_prefix] ( identifier[prefix] , identifier[s] ):
literal[string]
identifier[toks] = identifier[xpath_lexer] . identifier[scan] ( identifier[s] )
identifier[toks2] =[ identifier[_add_prefix] ( identifier[prefix] , identifier[tok] ) keyword[for] identifier[tok] keyword[in] identifier[toks] ]
identifier[ls] =[ identifier[x] . identifier[value] keyword[for] identifier[x] keyword[in] identifier[toks2] ]
keyword[return] literal[string] . identifier[join] ( identifier[ls] ) | def add_prefix(prefix, s):
"""Add `prefix` to all unprefixed names in `s`"""
# tokenize the XPath expression
toks = xpath_lexer.scan(s)
# add default prefix to unprefixed names
toks2 = [_add_prefix(prefix, tok) for tok in toks]
# build a string of the patched expression
ls = [x.value for x in toks2]
return ''.join(ls) |
def write_stats(datadfs, outputfile, names=[]):
"""Call calculation functions and write stats file.
This function takes a list of DataFrames,
and will create a column for each in the tab separated output.
"""
if outputfile == 'stdout':
output = sys.stdout
else:
output = open(outputfile, 'wt')
stats = [Stats(df) for df in datadfs]
features = {
"Number of reads": "number_of_reads",
"Total bases": "number_of_bases",
"Total bases aligned": "number_of_bases_aligned",
"Median read length": "median_read_length",
"Mean read length": "mean_read_length",
"Read length N50": "n50",
"Average percent identity": "average_identity",
"Median percent identity": "median_identity",
"Active channels": "active_channels",
"Mean read quality": "mean_qual",
"Median read quality": "median_qual",
}
max_len = max([len(k) for k in features.keys()])
try:
max_num = max(max([len(str(s.number_of_bases)) for s in stats]),
max([len(str(n)) for n in names])) + 6
except ValueError:
max_num = max([len(str(s.number_of_bases)) for s in stats]) + 6
output.write("{:<{}}{}\n".format('General summary:', max_len,
" ".join(['{:>{}}'.format(n, max_num) for n in names])))
for f in sorted(features.keys()):
try:
output.write("{f:{pad}}{v}\n".format(
f=f + ':',
pad=max_len,
v=feature_list(stats, features[f], padding=max_num)))
except KeyError:
pass
if all(["quals" in df for df in datadfs]):
long_features = {
"Top 5 longest reads and their mean basecall quality score":
["top5_lengths", range(1, 6)],
"Top 5 highest mean basecall quality scores and their read lengths":
["top5_quals", range(1, 6)],
"Number, percentage and megabases of reads above quality cutoffs":
["reads_above_qual", [">Q" + str(q) for q in stats[0].qualgroups]],
}
for lf in sorted(long_features.keys()):
output.write(lf + "\n")
for i in range(5):
output.write("{}:\t{}\n".format(
long_features[lf][1][i], feature_list(stats, long_features[lf][0], index=i))) | def function[write_stats, parameter[datadfs, outputfile, names]]:
constant[Call calculation functions and write stats file.
This function takes a list of DataFrames,
and will create a column for each in the tab separated output.
]
if compare[name[outputfile] equal[==] constant[stdout]] begin[:]
variable[output] assign[=] name[sys].stdout
variable[stats] assign[=] <ast.ListComp object at 0x7da18f00c4f0>
variable[features] assign[=] dictionary[[<ast.Constant object at 0x7da18f00c490>, <ast.Constant object at 0x7da18f00d120>, <ast.Constant object at 0x7da18f00e890>, <ast.Constant object at 0x7da18f00cee0>, <ast.Constant object at 0x7da18f00e320>, <ast.Constant object at 0x7da18f00f0d0>, <ast.Constant object at 0x7da18f00e4a0>, <ast.Constant object at 0x7da18f00f520>, <ast.Constant object at 0x7da18f00c370>, <ast.Constant object at 0x7da18f00f4c0>, <ast.Constant object at 0x7da18f00c7c0>], [<ast.Constant object at 0x7da18f00cf40>, <ast.Constant object at 0x7da18f00d4b0>, <ast.Constant object at 0x7da18f00f430>, <ast.Constant object at 0x7da18f00fe50>, <ast.Constant object at 0x7da18f00de70>, <ast.Constant object at 0x7da18f00c3d0>, <ast.Constant object at 0x7da18f00f7f0>, <ast.Constant object at 0x7da18f00f2e0>, <ast.Constant object at 0x7da18f00de10>, <ast.Constant object at 0x7da18f00d4e0>, <ast.Constant object at 0x7da18f00d690>]]
variable[max_len] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da2054a4dc0>]]
<ast.Try object at 0x7da2054a5660>
call[name[output].write, parameter[call[constant[{:<{}}{}
].format, parameter[constant[General summary:], name[max_len], call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2054a6680>]]]]]]
for taget[name[f]] in starred[call[name[sorted], parameter[call[name[features].keys, parameter[]]]]] begin[:]
<ast.Try object at 0x7da2054a61a0>
if call[name[all], parameter[<ast.ListComp object at 0x7da2054a4640>]] begin[:]
variable[long_features] assign[=] dictionary[[<ast.Constant object at 0x7da2054a7c70>, <ast.Constant object at 0x7da2054a72e0>, <ast.Constant object at 0x7da2054a5ea0>], [<ast.List object at 0x7da2054a7df0>, <ast.List object at 0x7da2054a70d0>, <ast.List object at 0x7da2054a4f70>]]
for taget[name[lf]] in starred[call[name[sorted], parameter[call[name[long_features].keys, parameter[]]]]] begin[:]
call[name[output].write, parameter[binary_operation[name[lf] + constant[
]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[5]]]] begin[:]
call[name[output].write, parameter[call[constant[{}: {}
].format, parameter[call[call[call[name[long_features]][name[lf]]][constant[1]]][name[i]], call[name[feature_list], parameter[name[stats], call[call[name[long_features]][name[lf]]][constant[0]]]]]]]] | keyword[def] identifier[write_stats] ( identifier[datadfs] , identifier[outputfile] , identifier[names] =[]):
literal[string]
keyword[if] identifier[outputfile] == literal[string] :
identifier[output] = identifier[sys] . identifier[stdout]
keyword[else] :
identifier[output] = identifier[open] ( identifier[outputfile] , literal[string] )
identifier[stats] =[ identifier[Stats] ( identifier[df] ) keyword[for] identifier[df] keyword[in] identifier[datadfs] ]
identifier[features] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[max_len] = identifier[max] ([ identifier[len] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[features] . identifier[keys] ()])
keyword[try] :
identifier[max_num] = identifier[max] ( identifier[max] ([ identifier[len] ( identifier[str] ( identifier[s] . identifier[number_of_bases] )) keyword[for] identifier[s] keyword[in] identifier[stats] ]),
identifier[max] ([ identifier[len] ( identifier[str] ( identifier[n] )) keyword[for] identifier[n] keyword[in] identifier[names] ]))+ literal[int]
keyword[except] identifier[ValueError] :
identifier[max_num] = identifier[max] ([ identifier[len] ( identifier[str] ( identifier[s] . identifier[number_of_bases] )) keyword[for] identifier[s] keyword[in] identifier[stats] ])+ literal[int]
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( literal[string] , identifier[max_len] ,
literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[n] , identifier[max_num] ) keyword[for] identifier[n] keyword[in] identifier[names] ])))
keyword[for] identifier[f] keyword[in] identifier[sorted] ( identifier[features] . identifier[keys] ()):
keyword[try] :
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[f] = identifier[f] + literal[string] ,
identifier[pad] = identifier[max_len] ,
identifier[v] = identifier[feature_list] ( identifier[stats] , identifier[features] [ identifier[f] ], identifier[padding] = identifier[max_num] )))
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[if] identifier[all] ([ literal[string] keyword[in] identifier[df] keyword[for] identifier[df] keyword[in] identifier[datadfs] ]):
identifier[long_features] ={
literal[string] :
[ literal[string] , identifier[range] ( literal[int] , literal[int] )],
literal[string] :
[ literal[string] , identifier[range] ( literal[int] , literal[int] )],
literal[string] :
[ literal[string] ,[ literal[string] + identifier[str] ( identifier[q] ) keyword[for] identifier[q] keyword[in] identifier[stats] [ literal[int] ]. identifier[qualgroups] ]],
}
keyword[for] identifier[lf] keyword[in] identifier[sorted] ( identifier[long_features] . identifier[keys] ()):
identifier[output] . identifier[write] ( identifier[lf] + literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[output] . identifier[write] ( literal[string] . identifier[format] (
identifier[long_features] [ identifier[lf] ][ literal[int] ][ identifier[i] ], identifier[feature_list] ( identifier[stats] , identifier[long_features] [ identifier[lf] ][ literal[int] ], identifier[index] = identifier[i] ))) | def write_stats(datadfs, outputfile, names=[]):
"""Call calculation functions and write stats file.
This function takes a list of DataFrames,
and will create a column for each in the tab separated output.
"""
if outputfile == 'stdout':
output = sys.stdout # depends on [control=['if'], data=[]]
else:
output = open(outputfile, 'wt')
stats = [Stats(df) for df in datadfs]
features = {'Number of reads': 'number_of_reads', 'Total bases': 'number_of_bases', 'Total bases aligned': 'number_of_bases_aligned', 'Median read length': 'median_read_length', 'Mean read length': 'mean_read_length', 'Read length N50': 'n50', 'Average percent identity': 'average_identity', 'Median percent identity': 'median_identity', 'Active channels': 'active_channels', 'Mean read quality': 'mean_qual', 'Median read quality': 'median_qual'}
max_len = max([len(k) for k in features.keys()])
try:
max_num = max(max([len(str(s.number_of_bases)) for s in stats]), max([len(str(n)) for n in names])) + 6 # depends on [control=['try'], data=[]]
except ValueError:
max_num = max([len(str(s.number_of_bases)) for s in stats]) + 6 # depends on [control=['except'], data=[]]
output.write('{:<{}}{}\n'.format('General summary:', max_len, ' '.join(['{:>{}}'.format(n, max_num) for n in names])))
for f in sorted(features.keys()):
try:
output.write('{f:{pad}}{v}\n'.format(f=f + ':', pad=max_len, v=feature_list(stats, features[f], padding=max_num))) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['f']]
if all(['quals' in df for df in datadfs]):
long_features = {'Top 5 longest reads and their mean basecall quality score': ['top5_lengths', range(1, 6)], 'Top 5 highest mean basecall quality scores and their read lengths': ['top5_quals', range(1, 6)], 'Number, percentage and megabases of reads above quality cutoffs': ['reads_above_qual', ['>Q' + str(q) for q in stats[0].qualgroups]]}
for lf in sorted(long_features.keys()):
output.write(lf + '\n')
for i in range(5):
output.write('{}:\t{}\n'.format(long_features[lf][1][i], feature_list(stats, long_features[lf][0], index=i))) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['lf']] # depends on [control=['if'], data=[]] |
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps))) | def function[frames_to_ms, parameter[frames, fps]]:
constant[
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
]
if compare[name[fps] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da207f98c10>
return[call[name[int], parameter[call[name[round], parameter[binary_operation[name[frames] * binary_operation[constant[1000] / name[fps]]]]]]]] | keyword[def] identifier[frames_to_ms] ( identifier[frames] , identifier[fps] ):
literal[string]
keyword[if] identifier[fps] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[fps] )
keyword[return] identifier[int] ( identifier[round] ( identifier[frames] *( literal[int] / identifier[fps] ))) | def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError('Framerate must be positive number (%f).' % fps) # depends on [control=['if'], data=['fps']]
return int(round(frames * (1000 / fps))) |
def running(name,
restart=False,
update=False,
user=None,
conf_file=None,
bin_env=None,
**kwargs):
'''
Ensure the named service is running.
name
Service name as defined in the supervisor configuration file
restart
Whether to force a restart
update
Whether to update the supervisor configuration.
user
Name of the user to run the supervisorctl command
.. versionadded:: 0.17.0
conf_file
path to supervisorctl config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
'''
if name.endswith(':*'):
name = name[:-1]
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'supervisord.status' not in __salt__:
ret['result'] = False
ret['comment'] = 'Supervisord module not activated. Do you need to install supervisord?'
return ret
all_processes = __salt__['supervisord.status'](
user=user,
conf_file=conf_file,
bin_env=bin_env
)
# parse process groups
process_groups = set()
for proc in all_processes:
if ':' in proc:
process_groups.add(proc[:proc.index(':') + 1])
process_groups = sorted(process_groups)
matches = {}
if name in all_processes:
matches[name] = (all_processes[name]['state'].lower() == 'running')
elif name in process_groups:
for process in (x for x in all_processes if x.startswith(name)):
matches[process] = (
all_processes[process]['state'].lower() == 'running'
)
to_add = not bool(matches)
if __opts__['test']:
if not to_add:
# Process/group already present, check if any need to be started
to_start = [x for x, y in six.iteritems(matches) if y is False]
if to_start:
ret['result'] = None
if name.endswith(':'):
# Process group
if len(to_start) == len(matches):
ret['comment'] = (
'All services in group \'{0}\' will be started'
.format(name)
)
else:
ret['comment'] = (
'The following services will be started: {0}'
.format(' '.join(to_start))
)
else:
# Single program
ret['comment'] = 'Service {0} will be started'.format(name)
else:
if name.endswith(':'):
# Process group
ret['comment'] = (
'All services in group \'{0}\' are already running'
.format(name)
)
else:
ret['comment'] = ('Service {0} is already running'
.format(name))
else:
ret['result'] = None
# Process/group needs to be added
if name.endswith(':'):
_type = 'Group \'{0}\''.format(name)
else:
_type = 'Service {0}'.format(name)
ret['comment'] = '{0} will be added and started'.format(_type)
return ret
changes = []
just_updated = False
if update:
# If the state explicitly asks to update, we don't care if the process
# is being added or not, since it'll take care of this for us,
# so give this condition priority in order
#
# That is, unless `to_add` somehow manages to contain processes
# we don't want running, in which case adding them may be a mistake
comment = 'Updating supervisor'
result = __salt__['supervisord.update'](
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
log.debug(comment)
if '{0}: updated'.format(name) in result:
just_updated = True
elif to_add:
# Not sure if this condition is precise enough.
comment = 'Adding service: {0}'.format(name)
__salt__['supervisord.reread'](
user=user,
conf_file=conf_file,
bin_env=bin_env
)
# Causes supervisorctl to throw `ERROR: process group already active`
# if process group exists. At this moment, I'm not sure how to handle
# this outside of grepping out the expected string in `_check_error`.
result = __salt__['supervisord.add'](
name,
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
changes.append(comment)
log.debug(comment)
is_stopped = None
process_type = None
if name in process_groups:
process_type = 'group'
# check if any processes in this group are stopped
is_stopped = False
for proc in all_processes:
if proc.startswith(name) \
and _is_stopped_state(all_processes[proc]['state']):
is_stopped = True
break
elif name in all_processes:
process_type = 'service'
if _is_stopped_state(all_processes[name]['state']):
is_stopped = True
else:
is_stopped = False
if is_stopped is False:
if restart and not just_updated:
comment = 'Restarting{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
log.debug(comment)
result = __salt__['supervisord.restart'](
name,
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
changes.append(comment)
elif just_updated:
comment = 'Not starting updated{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
result = comment
ret.update({'comment': comment})
else:
comment = 'Not starting already running{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
result = comment
ret.update({'comment': comment})
elif not just_updated:
comment = 'Starting{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
changes.append(comment)
log.debug(comment)
result = __salt__['supervisord.start'](
name,
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
log.debug(six.text_type(result))
if ret['result'] and changes:
ret['changes'][name] = ' '.join(changes)
return ret | def function[running, parameter[name, restart, update, user, conf_file, bin_env]]:
constant[
Ensure the named service is running.
name
Service name as defined in the supervisor configuration file
restart
Whether to force a restart
update
Whether to update the supervisor configuration.
user
Name of the user to run the supervisorctl command
.. versionadded:: 0.17.0
conf_file
path to supervisorctl config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
]
if call[name[name].endswith, parameter[constant[:*]]] begin[:]
variable[name] assign[=] call[name[name]][<ast.Slice object at 0x7da1b1c37a60>]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c37940>, <ast.Constant object at 0x7da1b1c37910>, <ast.Constant object at 0x7da1b1c378e0>, <ast.Constant object at 0x7da1b1c378b0>], [<ast.Name object at 0x7da1b1c37880>, <ast.Constant object at 0x7da1b1c37850>, <ast.Constant object at 0x7da1b1c37820>, <ast.Dict object at 0x7da1b1c377f0>]]
if compare[constant[supervisord.status] <ast.NotIn object at 0x7da2590d7190> name[__salt__]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[Supervisord module not activated. Do you need to install supervisord?]
return[name[ret]]
variable[all_processes] assign[=] call[call[name[__salt__]][constant[supervisord.status]], parameter[]]
variable[process_groups] assign[=] call[name[set], parameter[]]
for taget[name[proc]] in starred[name[all_processes]] begin[:]
if compare[constant[:] in name[proc]] begin[:]
call[name[process_groups].add, parameter[call[name[proc]][<ast.Slice object at 0x7da1b1c36f20>]]]
variable[process_groups] assign[=] call[name[sorted], parameter[name[process_groups]]]
variable[matches] assign[=] dictionary[[], []]
if compare[name[name] in name[all_processes]] begin[:]
call[name[matches]][name[name]] assign[=] compare[call[call[call[name[all_processes]][name[name]]][constant[state]].lower, parameter[]] equal[==] constant[running]]
variable[to_add] assign[=] <ast.UnaryOp object at 0x7da1b1c36320>
if call[name[__opts__]][constant[test]] begin[:]
if <ast.UnaryOp object at 0x7da1b1c36170> begin[:]
variable[to_start] assign[=] <ast.ListComp object at 0x7da1b1c360b0>
if name[to_start] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
if call[name[name].endswith, parameter[constant[:]]] begin[:]
if compare[call[name[len], parameter[name[to_start]]] equal[==] call[name[len], parameter[name[matches]]]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[All services in group '{0}' will be started].format, parameter[name[name]]]
return[name[ret]]
variable[changes] assign[=] list[[]]
variable[just_updated] assign[=] constant[False]
if name[update] begin[:]
variable[comment] assign[=] constant[Updating supervisor]
variable[result] assign[=] call[call[name[__salt__]][constant[supervisord.update]], parameter[]]
call[name[ret].update, parameter[call[name[_check_error], parameter[name[result], name[comment]]]]]
call[name[log].debug, parameter[name[comment]]]
if compare[call[constant[{0}: updated].format, parameter[name[name]]] in name[result]] begin[:]
variable[just_updated] assign[=] constant[True]
variable[is_stopped] assign[=] constant[None]
variable[process_type] assign[=] constant[None]
if compare[name[name] in name[process_groups]] begin[:]
variable[process_type] assign[=] constant[group]
variable[is_stopped] assign[=] constant[False]
for taget[name[proc]] in starred[name[all_processes]] begin[:]
if <ast.BoolOp object at 0x7da1b1c65960> begin[:]
variable[is_stopped] assign[=] constant[True]
break
if compare[name[is_stopped] is constant[False]] begin[:]
if <ast.BoolOp object at 0x7da1b1c65c00> begin[:]
variable[comment] assign[=] call[constant[Restarting{0}: {1}].format, parameter[<ast.BoolOp object at 0x7da1b1c65e70>, name[name]]]
call[name[log].debug, parameter[name[comment]]]
variable[result] assign[=] call[call[name[__salt__]][constant[supervisord.restart]], parameter[name[name]]]
call[name[ret].update, parameter[call[name[_check_error], parameter[name[result], name[comment]]]]]
call[name[changes].append, parameter[name[comment]]]
if <ast.BoolOp object at 0x7da1b1c64490> begin[:]
call[call[name[ret]][constant[changes]]][name[name]] assign[=] call[constant[ ].join, parameter[name[changes]]]
return[name[ret]] | keyword[def] identifier[running] ( identifier[name] ,
identifier[restart] = keyword[False] ,
identifier[update] = keyword[False] ,
identifier[user] = keyword[None] ,
identifier[conf_file] = keyword[None] ,
identifier[bin_env] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
identifier[name] = identifier[name] [:- literal[int] ]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] :{}}
keyword[if] literal[string] keyword[not] keyword[in] identifier[__salt__] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[all_processes] = identifier[__salt__] [ literal[string] ](
identifier[user] = identifier[user] ,
identifier[conf_file] = identifier[conf_file] ,
identifier[bin_env] = identifier[bin_env]
)
identifier[process_groups] = identifier[set] ()
keyword[for] identifier[proc] keyword[in] identifier[all_processes] :
keyword[if] literal[string] keyword[in] identifier[proc] :
identifier[process_groups] . identifier[add] ( identifier[proc] [: identifier[proc] . identifier[index] ( literal[string] )+ literal[int] ])
identifier[process_groups] = identifier[sorted] ( identifier[process_groups] )
identifier[matches] ={}
keyword[if] identifier[name] keyword[in] identifier[all_processes] :
identifier[matches] [ identifier[name] ]=( identifier[all_processes] [ identifier[name] ][ literal[string] ]. identifier[lower] ()== literal[string] )
keyword[elif] identifier[name] keyword[in] identifier[process_groups] :
keyword[for] identifier[process] keyword[in] ( identifier[x] keyword[for] identifier[x] keyword[in] identifier[all_processes] keyword[if] identifier[x] . identifier[startswith] ( identifier[name] )):
identifier[matches] [ identifier[process] ]=(
identifier[all_processes] [ identifier[process] ][ literal[string] ]. identifier[lower] ()== literal[string]
)
identifier[to_add] = keyword[not] identifier[bool] ( identifier[matches] )
keyword[if] identifier[__opts__] [ literal[string] ]:
keyword[if] keyword[not] identifier[to_add] :
identifier[to_start] =[ identifier[x] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[six] . identifier[iteritems] ( identifier[matches] ) keyword[if] identifier[y] keyword[is] keyword[False] ]
keyword[if] identifier[to_start] :
identifier[ret] [ literal[string] ]= keyword[None]
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
keyword[if] identifier[len] ( identifier[to_start] )== identifier[len] ( identifier[matches] ):
identifier[ret] [ literal[string] ]=(
literal[string]
. identifier[format] ( identifier[name] )
)
keyword[else] :
identifier[ret] [ literal[string] ]=(
literal[string]
. identifier[format] ( literal[string] . identifier[join] ( identifier[to_start] ))
)
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[else] :
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
identifier[ret] [ literal[string] ]=(
literal[string]
. identifier[format] ( identifier[name] )
)
keyword[else] :
identifier[ret] [ literal[string] ]=( literal[string]
. identifier[format] ( identifier[name] ))
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[None]
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
identifier[_type] = literal[string] . identifier[format] ( identifier[name] )
keyword[else] :
identifier[_type] = literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[_type] )
keyword[return] identifier[ret]
identifier[changes] =[]
identifier[just_updated] = keyword[False]
keyword[if] identifier[update] :
identifier[comment] = literal[string]
identifier[result] = identifier[__salt__] [ literal[string] ](
identifier[user] = identifier[user] ,
identifier[conf_file] = identifier[conf_file] ,
identifier[bin_env] = identifier[bin_env]
)
identifier[ret] . identifier[update] ( identifier[_check_error] ( identifier[result] , identifier[comment] ))
identifier[log] . identifier[debug] ( identifier[comment] )
keyword[if] literal[string] . identifier[format] ( identifier[name] ) keyword[in] identifier[result] :
identifier[just_updated] = keyword[True]
keyword[elif] identifier[to_add] :
identifier[comment] = literal[string] . identifier[format] ( identifier[name] )
identifier[__salt__] [ literal[string] ](
identifier[user] = identifier[user] ,
identifier[conf_file] = identifier[conf_file] ,
identifier[bin_env] = identifier[bin_env]
)
identifier[result] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[user] = identifier[user] ,
identifier[conf_file] = identifier[conf_file] ,
identifier[bin_env] = identifier[bin_env]
)
identifier[ret] . identifier[update] ( identifier[_check_error] ( identifier[result] , identifier[comment] ))
identifier[changes] . identifier[append] ( identifier[comment] )
identifier[log] . identifier[debug] ( identifier[comment] )
identifier[is_stopped] = keyword[None]
identifier[process_type] = keyword[None]
keyword[if] identifier[name] keyword[in] identifier[process_groups] :
identifier[process_type] = literal[string]
identifier[is_stopped] = keyword[False]
keyword[for] identifier[proc] keyword[in] identifier[all_processes] :
keyword[if] identifier[proc] . identifier[startswith] ( identifier[name] ) keyword[and] identifier[_is_stopped_state] ( identifier[all_processes] [ identifier[proc] ][ literal[string] ]):
identifier[is_stopped] = keyword[True]
keyword[break]
keyword[elif] identifier[name] keyword[in] identifier[all_processes] :
identifier[process_type] = literal[string]
keyword[if] identifier[_is_stopped_state] ( identifier[all_processes] [ identifier[name] ][ literal[string] ]):
identifier[is_stopped] = keyword[True]
keyword[else] :
identifier[is_stopped] = keyword[False]
keyword[if] identifier[is_stopped] keyword[is] keyword[False] :
keyword[if] identifier[restart] keyword[and] keyword[not] identifier[just_updated] :
identifier[comment] = literal[string] . identifier[format] (
identifier[process_type] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] . identifier[format] ( identifier[process_type] ) keyword[or] literal[string] ,
identifier[name]
)
identifier[log] . identifier[debug] ( identifier[comment] )
identifier[result] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[user] = identifier[user] ,
identifier[conf_file] = identifier[conf_file] ,
identifier[bin_env] = identifier[bin_env]
)
identifier[ret] . identifier[update] ( identifier[_check_error] ( identifier[result] , identifier[comment] ))
identifier[changes] . identifier[append] ( identifier[comment] )
keyword[elif] identifier[just_updated] :
identifier[comment] = literal[string] . identifier[format] (
identifier[process_type] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] . identifier[format] ( identifier[process_type] ) keyword[or] literal[string] ,
identifier[name]
)
identifier[result] = identifier[comment]
identifier[ret] . identifier[update] ({ literal[string] : identifier[comment] })
keyword[else] :
identifier[comment] = literal[string] . identifier[format] (
identifier[process_type] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] . identifier[format] ( identifier[process_type] ) keyword[or] literal[string] ,
identifier[name]
)
identifier[result] = identifier[comment]
identifier[ret] . identifier[update] ({ literal[string] : identifier[comment] })
keyword[elif] keyword[not] identifier[just_updated] :
identifier[comment] = literal[string] . identifier[format] (
identifier[process_type] keyword[is] keyword[not] keyword[None] keyword[and] literal[string] . identifier[format] ( identifier[process_type] ) keyword[or] literal[string] ,
identifier[name]
)
identifier[changes] . identifier[append] ( identifier[comment] )
identifier[log] . identifier[debug] ( identifier[comment] )
identifier[result] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[user] = identifier[user] ,
identifier[conf_file] = identifier[conf_file] ,
identifier[bin_env] = identifier[bin_env]
)
identifier[ret] . identifier[update] ( identifier[_check_error] ( identifier[result] , identifier[comment] ))
identifier[log] . identifier[debug] ( identifier[six] . identifier[text_type] ( identifier[result] ))
keyword[if] identifier[ret] [ literal[string] ] keyword[and] identifier[changes] :
identifier[ret] [ literal[string] ][ identifier[name] ]= literal[string] . identifier[join] ( identifier[changes] )
keyword[return] identifier[ret] | def running(name, restart=False, update=False, user=None, conf_file=None, bin_env=None, **kwargs):
"""
Ensure the named service is running.
name
Service name as defined in the supervisor configuration file
restart
Whether to force a restart
update
Whether to update the supervisor configuration.
user
Name of the user to run the supervisorctl command
.. versionadded:: 0.17.0
conf_file
path to supervisorctl config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
"""
if name.endswith(':*'):
name = name[:-1] # depends on [control=['if'], data=[]]
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'supervisord.status' not in __salt__:
ret['result'] = False
ret['comment'] = 'Supervisord module not activated. Do you need to install supervisord?'
return ret # depends on [control=['if'], data=[]]
all_processes = __salt__['supervisord.status'](user=user, conf_file=conf_file, bin_env=bin_env)
# parse process groups
process_groups = set()
for proc in all_processes:
if ':' in proc:
process_groups.add(proc[:proc.index(':') + 1]) # depends on [control=['if'], data=['proc']] # depends on [control=['for'], data=['proc']]
process_groups = sorted(process_groups)
matches = {}
if name in all_processes:
matches[name] = all_processes[name]['state'].lower() == 'running' # depends on [control=['if'], data=['name', 'all_processes']]
elif name in process_groups:
for process in (x for x in all_processes if x.startswith(name)):
matches[process] = all_processes[process]['state'].lower() == 'running' # depends on [control=['for'], data=['process']] # depends on [control=['if'], data=['name']]
to_add = not bool(matches)
if __opts__['test']:
if not to_add:
# Process/group already present, check if any need to be started
to_start = [x for (x, y) in six.iteritems(matches) if y is False]
if to_start:
ret['result'] = None
if name.endswith(':'):
# Process group
if len(to_start) == len(matches):
ret['comment'] = "All services in group '{0}' will be started".format(name) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'The following services will be started: {0}'.format(' '.join(to_start)) # depends on [control=['if'], data=[]]
else:
# Single program
ret['comment'] = 'Service {0} will be started'.format(name) # depends on [control=['if'], data=[]]
elif name.endswith(':'):
# Process group
ret['comment'] = "All services in group '{0}' are already running".format(name) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Service {0} is already running'.format(name) # depends on [control=['if'], data=[]]
else:
ret['result'] = None
# Process/group needs to be added
if name.endswith(':'):
_type = "Group '{0}'".format(name) # depends on [control=['if'], data=[]]
else:
_type = 'Service {0}'.format(name)
ret['comment'] = '{0} will be added and started'.format(_type)
return ret # depends on [control=['if'], data=[]]
changes = []
just_updated = False
if update:
# If the state explicitly asks to update, we don't care if the process
# is being added or not, since it'll take care of this for us,
# so give this condition priority in order
#
# That is, unless `to_add` somehow manages to contain processes
# we don't want running, in which case adding them may be a mistake
comment = 'Updating supervisor'
result = __salt__['supervisord.update'](user=user, conf_file=conf_file, bin_env=bin_env)
ret.update(_check_error(result, comment))
log.debug(comment)
if '{0}: updated'.format(name) in result:
just_updated = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif to_add:
# Not sure if this condition is precise enough.
comment = 'Adding service: {0}'.format(name)
__salt__['supervisord.reread'](user=user, conf_file=conf_file, bin_env=bin_env)
# Causes supervisorctl to throw `ERROR: process group already active`
# if process group exists. At this moment, I'm not sure how to handle
# this outside of grepping out the expected string in `_check_error`.
result = __salt__['supervisord.add'](name, user=user, conf_file=conf_file, bin_env=bin_env)
ret.update(_check_error(result, comment))
changes.append(comment)
log.debug(comment) # depends on [control=['if'], data=[]]
is_stopped = None
process_type = None
if name in process_groups:
process_type = 'group'
# check if any processes in this group are stopped
is_stopped = False
for proc in all_processes:
if proc.startswith(name) and _is_stopped_state(all_processes[proc]['state']):
is_stopped = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['proc']] # depends on [control=['if'], data=['name']]
elif name in all_processes:
process_type = 'service'
if _is_stopped_state(all_processes[name]['state']):
is_stopped = True # depends on [control=['if'], data=[]]
else:
is_stopped = False # depends on [control=['if'], data=['name', 'all_processes']]
if is_stopped is False:
if restart and (not just_updated):
comment = 'Restarting{0}: {1}'.format(process_type is not None and ' {0}'.format(process_type) or '', name)
log.debug(comment)
result = __salt__['supervisord.restart'](name, user=user, conf_file=conf_file, bin_env=bin_env)
ret.update(_check_error(result, comment))
changes.append(comment) # depends on [control=['if'], data=[]]
elif just_updated:
comment = 'Not starting updated{0}: {1}'.format(process_type is not None and ' {0}'.format(process_type) or '', name)
result = comment
ret.update({'comment': comment}) # depends on [control=['if'], data=[]]
else:
comment = 'Not starting already running{0}: {1}'.format(process_type is not None and ' {0}'.format(process_type) or '', name)
result = comment
ret.update({'comment': comment}) # depends on [control=['if'], data=[]]
elif not just_updated:
comment = 'Starting{0}: {1}'.format(process_type is not None and ' {0}'.format(process_type) or '', name)
changes.append(comment)
log.debug(comment)
result = __salt__['supervisord.start'](name, user=user, conf_file=conf_file, bin_env=bin_env)
ret.update(_check_error(result, comment))
log.debug(six.text_type(result)) # depends on [control=['if'], data=[]]
if ret['result'] and changes:
ret['changes'][name] = ' '.join(changes) # depends on [control=['if'], data=[]]
return ret |
def find_remote_bundle(self, ref, try_harder=None):
"""
Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache
directory lists stored in the remotes, which must be updated to be current.
:param vid: A bundle or partition reference, vid, or name
:param try_harder: If the reference isn't found, try parsing for an object id, or subsets of the name
:return: (remote,vname) or (None,None) if the ref is not found
"""
from ambry.identity import ObjectNumber
remote, vid = self._find_remote_bundle(ref)
if remote:
return (remote, vid)
if try_harder:
on = ObjectNumber.parse(vid)
if on:
raise NotImplementedError()
don = on.as_dataset
return self._find_remote_bundle(vid)
# Try subsets of a name, assuming it is a name
parts = ref.split('-')
for i in range(len(parts) - 1, 2, -1):
remote, vid = self._find_remote_bundle('-'.join(parts[:i]))
if remote:
return (remote, vid)
return (None, None) | def function[find_remote_bundle, parameter[self, ref, try_harder]]:
constant[
Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache
directory lists stored in the remotes, which must be updated to be current.
:param vid: A bundle or partition reference, vid, or name
:param try_harder: If the reference isn't found, try parsing for an object id, or subsets of the name
:return: (remote,vname) or (None,None) if the ref is not found
]
from relative_module[ambry.identity] import module[ObjectNumber]
<ast.Tuple object at 0x7da2043475e0> assign[=] call[name[self]._find_remote_bundle, parameter[name[ref]]]
if name[remote] begin[:]
return[tuple[[<ast.Name object at 0x7da18c4cdcf0>, <ast.Name object at 0x7da18c4cfc10>]]]
if name[try_harder] begin[:]
variable[on] assign[=] call[name[ObjectNumber].parse, parameter[name[vid]]]
if name[on] begin[:]
<ast.Raise object at 0x7da18c4cc310>
variable[don] assign[=] name[on].as_dataset
return[call[name[self]._find_remote_bundle, parameter[name[vid]]]]
variable[parts] assign[=] call[name[ref].split, parameter[constant[-]]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[parts]]] - constant[1]], constant[2], <ast.UnaryOp object at 0x7da18c4cd2d0>]]] begin[:]
<ast.Tuple object at 0x7da18c4ce740> assign[=] call[name[self]._find_remote_bundle, parameter[call[constant[-].join, parameter[call[name[parts]][<ast.Slice object at 0x7da18c4cfa60>]]]]]
if name[remote] begin[:]
return[tuple[[<ast.Name object at 0x7da18c4cd150>, <ast.Name object at 0x7da18c4ce8c0>]]]
return[tuple[[<ast.Constant object at 0x7da18c4cca30>, <ast.Constant object at 0x7da18c4cfbe0>]]] | keyword[def] identifier[find_remote_bundle] ( identifier[self] , identifier[ref] , identifier[try_harder] = keyword[None] ):
literal[string]
keyword[from] identifier[ambry] . identifier[identity] keyword[import] identifier[ObjectNumber]
identifier[remote] , identifier[vid] = identifier[self] . identifier[_find_remote_bundle] ( identifier[ref] )
keyword[if] identifier[remote] :
keyword[return] ( identifier[remote] , identifier[vid] )
keyword[if] identifier[try_harder] :
identifier[on] = identifier[ObjectNumber] . identifier[parse] ( identifier[vid] )
keyword[if] identifier[on] :
keyword[raise] identifier[NotImplementedError] ()
identifier[don] = identifier[on] . identifier[as_dataset]
keyword[return] identifier[self] . identifier[_find_remote_bundle] ( identifier[vid] )
identifier[parts] = identifier[ref] . identifier[split] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[parts] )- literal[int] , literal[int] ,- literal[int] ):
identifier[remote] , identifier[vid] = identifier[self] . identifier[_find_remote_bundle] ( literal[string] . identifier[join] ( identifier[parts] [: identifier[i] ]))
keyword[if] identifier[remote] :
keyword[return] ( identifier[remote] , identifier[vid] )
keyword[return] ( keyword[None] , keyword[None] ) | def find_remote_bundle(self, ref, try_harder=None):
"""
Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache
directory lists stored in the remotes, which must be updated to be current.
:param vid: A bundle or partition reference, vid, or name
:param try_harder: If the reference isn't found, try parsing for an object id, or subsets of the name
:return: (remote,vname) or (None,None) if the ref is not found
"""
from ambry.identity import ObjectNumber
(remote, vid) = self._find_remote_bundle(ref)
if remote:
return (remote, vid) # depends on [control=['if'], data=[]]
if try_harder:
on = ObjectNumber.parse(vid)
if on:
raise NotImplementedError()
don = on.as_dataset
return self._find_remote_bundle(vid) # depends on [control=['if'], data=[]]
# Try subsets of a name, assuming it is a name
parts = ref.split('-')
for i in range(len(parts) - 1, 2, -1):
(remote, vid) = self._find_remote_bundle('-'.join(parts[:i]))
if remote:
return (remote, vid) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return (None, None) |
def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values()))) | def function[get_all_leaves, parameter[self, item_ids, language, forbidden_item_ids]]:
constant[
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
]
return[call[name[sorted], parameter[call[name[set], parameter[call[name[flatten], parameter[call[call[name[self].get_leaves, parameter[name[item_ids]]].values, parameter[]]]]]]]]] | keyword[def] identifier[get_all_leaves] ( identifier[self] , identifier[item_ids] = keyword[None] , identifier[language] = keyword[None] , identifier[forbidden_item_ids] = keyword[None] ):
literal[string]
keyword[return] identifier[sorted] ( identifier[set] ( identifier[flatten] ( identifier[self] . identifier[get_leaves] ( identifier[item_ids] , identifier[language] = identifier[language] , identifier[forbidden_item_ids] = identifier[forbidden_item_ids] ). identifier[values] ()))) | def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values()))) |
def calculate_ecef_velocity(inst):
"""
Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z)
"""
x = inst['position_ecef_x']
vel_x = (x.values[2:] - x.values[0:-2])/2.
y = inst['position_ecef_y']
vel_y = (y.values[2:] - y.values[0:-2])/2.
z = inst['position_ecef_z']
vel_z = (z.values[2:] - z.values[0:-2])/2.
inst[1:-1, 'velocity_ecef_x'] = vel_x
inst[1:-1, 'velocity_ecef_y'] = vel_y
inst[1:-1, 'velocity_ecef_z'] = vel_z
inst.meta['velocity_ecef_x'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_y'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_z'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
return | def function[calculate_ecef_velocity, parameter[inst]]:
constant[
Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z)
]
variable[x] assign[=] call[name[inst]][constant[position_ecef_x]]
variable[vel_x] assign[=] binary_operation[binary_operation[call[name[x].values][<ast.Slice object at 0x7da1b10d7310>] - call[name[x].values][<ast.Slice object at 0x7da1b2347ee0>]] / constant[2.0]]
variable[y] assign[=] call[name[inst]][constant[position_ecef_y]]
variable[vel_y] assign[=] binary_operation[binary_operation[call[name[y].values][<ast.Slice object at 0x7da1b2346f20>] - call[name[y].values][<ast.Slice object at 0x7da1b2344a00>]] / constant[2.0]]
variable[z] assign[=] call[name[inst]][constant[position_ecef_z]]
variable[vel_z] assign[=] binary_operation[binary_operation[call[name[z].values][<ast.Slice object at 0x7da1b2344250>] - call[name[z].values][<ast.Slice object at 0x7da1b23443d0>]] / constant[2.0]]
call[name[inst]][tuple[[<ast.Slice object at 0x7da18c4cc100>, <ast.Constant object at 0x7da18c4cc640>]]] assign[=] name[vel_x]
call[name[inst]][tuple[[<ast.Slice object at 0x7da18c4cdba0>, <ast.Constant object at 0x7da18c4ced10>]]] assign[=] name[vel_y]
call[name[inst]][tuple[[<ast.Slice object at 0x7da18c4ce7a0>, <ast.Constant object at 0x7da18c4cccd0>]]] assign[=] name[vel_z]
call[name[inst].meta][constant[velocity_ecef_x]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf1f0>, <ast.Constant object at 0x7da18c4ce920>], [<ast.Constant object at 0x7da18c4cdc90>, <ast.Constant object at 0x7da18c4ce080>]]
call[name[inst].meta][constant[velocity_ecef_y]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cecb0>, <ast.Constant object at 0x7da18ede5c30>], [<ast.Constant object at 0x7da18ede5cf0>, <ast.Constant object at 0x7da18ede4610>]]
call[name[inst].meta][constant[velocity_ecef_z]] assign[=] dictionary[[<ast.Constant object at 0x7da18f58ceb0>, <ast.Constant object at 0x7da18f58ee60>], [<ast.Constant object at 0x7da18f58e230>, <ast.Constant object at 0x7da18f58fb50>]]
return[None] | keyword[def] identifier[calculate_ecef_velocity] ( identifier[inst] ):
literal[string]
identifier[x] = identifier[inst] [ literal[string] ]
identifier[vel_x] =( identifier[x] . identifier[values] [ literal[int] :]- identifier[x] . identifier[values] [ literal[int] :- literal[int] ])/ literal[int]
identifier[y] = identifier[inst] [ literal[string] ]
identifier[vel_y] =( identifier[y] . identifier[values] [ literal[int] :]- identifier[y] . identifier[values] [ literal[int] :- literal[int] ])/ literal[int]
identifier[z] = identifier[inst] [ literal[string] ]
identifier[vel_z] =( identifier[z] . identifier[values] [ literal[int] :]- identifier[z] . identifier[values] [ literal[int] :- literal[int] ])/ literal[int]
identifier[inst] [ literal[int] :- literal[int] , literal[string] ]= identifier[vel_x]
identifier[inst] [ literal[int] :- literal[int] , literal[string] ]= identifier[vel_y]
identifier[inst] [ literal[int] :- literal[int] , literal[string] ]= identifier[vel_z]
identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] ,
literal[string] : literal[string] }
identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] ,
literal[string] : literal[string] }
identifier[inst] . identifier[meta] [ literal[string] ]={ literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[return] | def calculate_ecef_velocity(inst):
"""
Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z)
"""
x = inst['position_ecef_x']
vel_x = (x.values[2:] - x.values[0:-2]) / 2.0
y = inst['position_ecef_y']
vel_y = (y.values[2:] - y.values[0:-2]) / 2.0
z = inst['position_ecef_z']
vel_z = (z.values[2:] - z.values[0:-2]) / 2.0
inst[1:-1, 'velocity_ecef_x'] = vel_x
inst[1:-1, 'velocity_ecef_y'] = vel_y
inst[1:-1, 'velocity_ecef_z'] = vel_z
inst.meta['velocity_ecef_x'] = {'units': 'km/s', 'desc': 'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_y'] = {'units': 'km/s', 'desc': 'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_z'] = {'units': 'km/s', 'desc': 'Velocity of satellite calculated with respect to ECEF frame.'}
return |
def format_cmd_output(cmd, output, name):
"""format command output for docs"""
formatted = '.. code-block:: console\n\n'
formatted += ' (venv)$ {c}\n'.format(c=cmd)
lines = output.split("\n")
if name != 'help':
for idx, line in enumerate(lines):
if len(line) > 100:
lines[idx] = line[:100] + ' (...)'
if len(lines) > 12:
tmp_lines = lines[:5] + ['(...)'] + lines[-5:]
if ' -l' in cmd or ' --list-defaults' in cmd:
# find a line that uses a limit from the API,
# and a line with None (unlimited)
api_line = None
none_line = None
for line in lines:
if '(API)' in line:
api_line = line
break
for line in lines:
if line.strip().endswith('None'):
none_line = line
break
tmp_lines = lines[:5]
if api_line not in tmp_lines and api_line is not None:
tmp_lines = tmp_lines + ['(...)'] + [api_line]
if none_line not in tmp_lines and none_line is not None:
tmp_lines = tmp_lines + ['(...)'] + [none_line]
tmp_lines = tmp_lines + ['(...)'] + lines[-5:]
lines = tmp_lines
for line in lines:
if line.strip() == '':
continue
formatted += ' ' + line + "\n"
formatted += '\n'
return formatted | def function[format_cmd_output, parameter[cmd, output, name]]:
constant[format command output for docs]
variable[formatted] assign[=] constant[.. code-block:: console
]
<ast.AugAssign object at 0x7da2045676a0>
variable[lines] assign[=] call[name[output].split, parameter[constant[
]]]
if compare[name[name] not_equal[!=] constant[help]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204566f20>, <ast.Name object at 0x7da204565810>]]] in starred[call[name[enumerate], parameter[name[lines]]]] begin[:]
if compare[call[name[len], parameter[name[line]]] greater[>] constant[100]] begin[:]
call[name[lines]][name[idx]] assign[=] binary_operation[call[name[line]][<ast.Slice object at 0x7da204566fb0>] + constant[ (...)]]
if compare[call[name[len], parameter[name[lines]]] greater[>] constant[12]] begin[:]
variable[tmp_lines] assign[=] binary_operation[binary_operation[call[name[lines]][<ast.Slice object at 0x7da1b1a65c90>] + list[[<ast.Constant object at 0x7da1b1a67b20>]]] + call[name[lines]][<ast.Slice object at 0x7da1b1a67c10>]]
if <ast.BoolOp object at 0x7da1b1a65060> begin[:]
variable[api_line] assign[=] constant[None]
variable[none_line] assign[=] constant[None]
for taget[name[line]] in starred[name[lines]] begin[:]
if compare[constant[(API)] in name[line]] begin[:]
variable[api_line] assign[=] name[line]
break
for taget[name[line]] in starred[name[lines]] begin[:]
if call[call[name[line].strip, parameter[]].endswith, parameter[constant[None]]] begin[:]
variable[none_line] assign[=] name[line]
break
variable[tmp_lines] assign[=] call[name[lines]][<ast.Slice object at 0x7da20c6aac50>]
if <ast.BoolOp object at 0x7da20c6ab7c0> begin[:]
variable[tmp_lines] assign[=] binary_operation[binary_operation[name[tmp_lines] + list[[<ast.Constant object at 0x7da20c6a9000>]]] + list[[<ast.Name object at 0x7da20c6aaa70>]]]
if <ast.BoolOp object at 0x7da20c6aaa10> begin[:]
variable[tmp_lines] assign[=] binary_operation[binary_operation[name[tmp_lines] + list[[<ast.Constant object at 0x7da20c6aacb0>]]] + list[[<ast.Name object at 0x7da18f00d4b0>]]]
variable[tmp_lines] assign[=] binary_operation[binary_operation[name[tmp_lines] + list[[<ast.Constant object at 0x7da18f00f580>]]] + call[name[lines]][<ast.Slice object at 0x7da18f00f640>]]
variable[lines] assign[=] name[tmp_lines]
for taget[name[line]] in starred[name[lines]] begin[:]
if compare[call[name[line].strip, parameter[]] equal[==] constant[]] begin[:]
continue
<ast.AugAssign object at 0x7da18f00f550>
<ast.AugAssign object at 0x7da18f00fb50>
return[name[formatted]] | keyword[def] identifier[format_cmd_output] ( identifier[cmd] , identifier[output] , identifier[name] ):
literal[string]
identifier[formatted] = literal[string]
identifier[formatted] += literal[string] . identifier[format] ( identifier[c] = identifier[cmd] )
identifier[lines] = identifier[output] . identifier[split] ( literal[string] )
keyword[if] identifier[name] != literal[string] :
keyword[for] identifier[idx] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] ):
keyword[if] identifier[len] ( identifier[line] )> literal[int] :
identifier[lines] [ identifier[idx] ]= identifier[line] [: literal[int] ]+ literal[string]
keyword[if] identifier[len] ( identifier[lines] )> literal[int] :
identifier[tmp_lines] = identifier[lines] [: literal[int] ]+[ literal[string] ]+ identifier[lines] [- literal[int] :]
keyword[if] literal[string] keyword[in] identifier[cmd] keyword[or] literal[string] keyword[in] identifier[cmd] :
identifier[api_line] = keyword[None]
identifier[none_line] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[api_line] = identifier[line]
keyword[break]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[line] . identifier[strip] (). identifier[endswith] ( literal[string] ):
identifier[none_line] = identifier[line]
keyword[break]
identifier[tmp_lines] = identifier[lines] [: literal[int] ]
keyword[if] identifier[api_line] keyword[not] keyword[in] identifier[tmp_lines] keyword[and] identifier[api_line] keyword[is] keyword[not] keyword[None] :
identifier[tmp_lines] = identifier[tmp_lines] +[ literal[string] ]+[ identifier[api_line] ]
keyword[if] identifier[none_line] keyword[not] keyword[in] identifier[tmp_lines] keyword[and] identifier[none_line] keyword[is] keyword[not] keyword[None] :
identifier[tmp_lines] = identifier[tmp_lines] +[ literal[string] ]+[ identifier[none_line] ]
identifier[tmp_lines] = identifier[tmp_lines] +[ literal[string] ]+ identifier[lines] [- literal[int] :]
identifier[lines] = identifier[tmp_lines]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] identifier[line] . identifier[strip] ()== literal[string] :
keyword[continue]
identifier[formatted] += literal[string] + identifier[line] + literal[string]
identifier[formatted] += literal[string]
keyword[return] identifier[formatted] | def format_cmd_output(cmd, output, name):
"""format command output for docs"""
formatted = '.. code-block:: console\n\n'
formatted += ' (venv)$ {c}\n'.format(c=cmd)
lines = output.split('\n')
if name != 'help':
for (idx, line) in enumerate(lines):
if len(line) > 100:
lines[idx] = line[:100] + ' (...)' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(lines) > 12:
tmp_lines = lines[:5] + ['(...)'] + lines[-5:]
if ' -l' in cmd or ' --list-defaults' in cmd:
# find a line that uses a limit from the API,
# and a line with None (unlimited)
api_line = None
none_line = None
for line in lines:
if '(API)' in line:
api_line = line
break # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']]
for line in lines:
if line.strip().endswith('None'):
none_line = line
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
tmp_lines = lines[:5]
if api_line not in tmp_lines and api_line is not None:
tmp_lines = tmp_lines + ['(...)'] + [api_line] # depends on [control=['if'], data=[]]
if none_line not in tmp_lines and none_line is not None:
tmp_lines = tmp_lines + ['(...)'] + [none_line] # depends on [control=['if'], data=[]]
tmp_lines = tmp_lines + ['(...)'] + lines[-5:] # depends on [control=['if'], data=[]]
lines = tmp_lines # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
for line in lines:
if line.strip() == '':
continue # depends on [control=['if'], data=[]]
formatted += ' ' + line + '\n' # depends on [control=['for'], data=['line']]
formatted += '\n'
return formatted |
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in."""
resource = get_resource(remote)
with db.session.begin_nested():
person_id = resource.get('PersonID', [None])
external_id = resource.get('uidNumber', person_id)[0]
# Set CERN person ID in extra_data.
token.remote_account.extra_data = {
'external_id': external_id,
}
groups = account_groups_and_extra_data(token.remote_account, resource)
assert not isinstance(g.identity, AnonymousIdentity)
extend_identity(g.identity, groups)
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, dict(id=external_id, method='cern')) | def function[account_setup, parameter[remote, token, resp]]:
constant[Perform additional setup after user have been logged in.]
variable[resource] assign[=] call[name[get_resource], parameter[name[remote]]]
with call[name[db].session.begin_nested, parameter[]] begin[:]
variable[person_id] assign[=] call[name[resource].get, parameter[constant[PersonID], list[[<ast.Constant object at 0x7da1b2518280>]]]]
variable[external_id] assign[=] call[call[name[resource].get, parameter[constant[uidNumber], name[person_id]]]][constant[0]]
name[token].remote_account.extra_data assign[=] dictionary[[<ast.Constant object at 0x7da1b257f2b0>], [<ast.Name object at 0x7da1b257e440>]]
variable[groups] assign[=] call[name[account_groups_and_extra_data], parameter[name[token].remote_account, name[resource]]]
assert[<ast.UnaryOp object at 0x7da1b257e920>]
call[name[extend_identity], parameter[name[g].identity, name[groups]]]
variable[user] assign[=] name[token].remote_account.user
call[name[oauth_link_external_id], parameter[name[user], call[name[dict], parameter[]]]] | keyword[def] identifier[account_setup] ( identifier[remote] , identifier[token] , identifier[resp] ):
literal[string]
identifier[resource] = identifier[get_resource] ( identifier[remote] )
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
identifier[person_id] = identifier[resource] . identifier[get] ( literal[string] ,[ keyword[None] ])
identifier[external_id] = identifier[resource] . identifier[get] ( literal[string] , identifier[person_id] )[ literal[int] ]
identifier[token] . identifier[remote_account] . identifier[extra_data] ={
literal[string] : identifier[external_id] ,
}
identifier[groups] = identifier[account_groups_and_extra_data] ( identifier[token] . identifier[remote_account] , identifier[resource] )
keyword[assert] keyword[not] identifier[isinstance] ( identifier[g] . identifier[identity] , identifier[AnonymousIdentity] )
identifier[extend_identity] ( identifier[g] . identifier[identity] , identifier[groups] )
identifier[user] = identifier[token] . identifier[remote_account] . identifier[user]
identifier[oauth_link_external_id] ( identifier[user] , identifier[dict] ( identifier[id] = identifier[external_id] , identifier[method] = literal[string] )) | def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in."""
resource = get_resource(remote)
with db.session.begin_nested():
person_id = resource.get('PersonID', [None])
external_id = resource.get('uidNumber', person_id)[0]
# Set CERN person ID in extra_data.
token.remote_account.extra_data = {'external_id': external_id}
groups = account_groups_and_extra_data(token.remote_account, resource)
assert not isinstance(g.identity, AnonymousIdentity)
extend_identity(g.identity, groups)
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, dict(id=external_id, method='cern')) # depends on [control=['with'], data=[]] |
def warn(self,message):
"""write a warning to the log file.
Parameters
----------
message : str
the warning text
"""
s = str(datetime.now()) + " WARNING: " + message + '\n'
if self.echo:
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush
warnings.warn(s,PyemuWarning) | def function[warn, parameter[self, message]]:
constant[write a warning to the log file.
Parameters
----------
message : str
the warning text
]
variable[s] assign[=] binary_operation[binary_operation[binary_operation[call[name[str], parameter[call[name[datetime].now, parameter[]]]] + constant[ WARNING: ]] + name[message]] + constant[
]]
if name[self].echo begin[:]
call[name[print], parameter[name[s]]]
if name[self].filename begin[:]
call[name[self].f.write, parameter[name[s]]]
name[self].f.flush
call[name[warnings].warn, parameter[name[s], name[PyemuWarning]]] | keyword[def] identifier[warn] ( identifier[self] , identifier[message] ):
literal[string]
identifier[s] = identifier[str] ( identifier[datetime] . identifier[now] ())+ literal[string] + identifier[message] + literal[string]
keyword[if] identifier[self] . identifier[echo] :
identifier[print] ( identifier[s] , identifier[end] = literal[string] )
keyword[if] identifier[self] . identifier[filename] :
identifier[self] . identifier[f] . identifier[write] ( identifier[s] )
identifier[self] . identifier[f] . identifier[flush]
identifier[warnings] . identifier[warn] ( identifier[s] , identifier[PyemuWarning] ) | def warn(self, message):
"""write a warning to the log file.
Parameters
----------
message : str
the warning text
"""
s = str(datetime.now()) + ' WARNING: ' + message + '\n'
if self.echo:
print(s, end='') # depends on [control=['if'], data=[]]
if self.filename:
self.f.write(s)
self.f.flush # depends on [control=['if'], data=[]]
warnings.warn(s, PyemuWarning) |
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml | def function[normalize_yaml, parameter[yaml]]:
constant[Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
]
if call[name[isinstance], parameter[name[yaml], name[list]]] begin[:]
variable[normalized_yaml] assign[=] <ast.ListComp object at 0x7da2041d9ae0>
return[name[normalized_yaml]] | keyword[def] identifier[normalize_yaml] ( identifier[yaml] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[yaml] , identifier[list] ):
identifier[normalized_yaml] =[( identifier[x] [ literal[string] ], identifier[x] [ literal[string] ], identifier[x] . identifier[get] ( literal[string] , literal[string] ))
keyword[for] identifier[x] keyword[in] identifier[yaml] ]
keyword[else] :
identifier[projects] =[ identifier[x] [:- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[yaml] . identifier[keys] () keyword[if] identifier[x] . identifier[endswith] ( literal[string] )]
identifier[normalized_yaml] =[]
keyword[for] identifier[project] keyword[in] identifier[projects] :
identifier[repo_url] = identifier[yaml] [ literal[string] . identifier[format] ( identifier[project] )]
identifier[commit_sha] = identifier[yaml] [ literal[string] . identifier[format] ( identifier[project] )]
identifier[normalized_yaml] . identifier[append] (( identifier[project] , identifier[repo_url] , identifier[commit_sha] ))
keyword[return] identifier[normalized_yaml] | def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD')) for x in yaml] # depends on [control=['if'], data=[]]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha)) # depends on [control=['for'], data=['project']]
return normalized_yaml |
def set_replication(self, path, replication):
r"""
Set the replication of ``path`` to ``replication``\ .
:type path: str
:param path: the path of the file
:type replication: int
:param replication: the replication value
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.set_replication(path, replication) | def function[set_replication, parameter[self, path, replication]]:
constant[
Set the replication of ``path`` to ``replication``\ .
:type path: str
:param path: the path of the file
:type replication: int
:param replication: the replication value
:raises: :exc:`~exceptions.IOError`
]
call[name[_complain_ifclosed], parameter[name[self].closed]]
return[call[name[self].fs.set_replication, parameter[name[path], name[replication]]]] | keyword[def] identifier[set_replication] ( identifier[self] , identifier[path] , identifier[replication] ):
literal[string]
identifier[_complain_ifclosed] ( identifier[self] . identifier[closed] )
keyword[return] identifier[self] . identifier[fs] . identifier[set_replication] ( identifier[path] , identifier[replication] ) | def set_replication(self, path, replication):
"""
Set the replication of ``path`` to ``replication``\\ .
:type path: str
:param path: the path of the file
:type replication: int
:param replication: the replication value
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.set_replication(path, replication) |
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append((name, kind, homecls, obj))
return result | def function[classify_class_attrs, parameter[cls]]:
constant[Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
]
variable[mro] assign[=] call[name[getmro], parameter[name[cls]]]
variable[names] assign[=] call[name[dir], parameter[name[cls]]]
variable[result] assign[=] list[[]]
for taget[name[name]] in starred[name[names]] begin[:]
if compare[name[name] in name[cls].__dict__] begin[:]
variable[obj] assign[=] call[name[cls].__dict__][name[name]]
variable[homecls] assign[=] call[name[getattr], parameter[name[obj], constant[__objclass__], constant[None]]]
if compare[name[homecls] is constant[None]] begin[:]
for taget[name[base]] in starred[name[mro]] begin[:]
if compare[name[name] in name[base].__dict__] begin[:]
variable[homecls] assign[=] name[base]
break
if <ast.BoolOp object at 0x7da1b088d5a0> begin[:]
variable[obj] assign[=] call[name[homecls].__dict__][name[name]]
variable[obj_via_getattr] assign[=] call[name[getattr], parameter[name[cls], name[name]]]
if call[name[isinstance], parameter[name[obj], name[staticmethod]]] begin[:]
variable[kind] assign[=] constant[static method]
call[name[result].append, parameter[tuple[[<ast.Name object at 0x7da1b0778550>, <ast.Name object at 0x7da1b0778580>, <ast.Name object at 0x7da1b07785b0>, <ast.Name object at 0x7da1b07785e0>]]]]
return[name[result]] | keyword[def] identifier[classify_class_attrs] ( identifier[cls] ):
literal[string]
identifier[mro] = identifier[getmro] ( identifier[cls] )
identifier[names] = identifier[dir] ( identifier[cls] )
identifier[result] =[]
keyword[for] identifier[name] keyword[in] identifier[names] :
keyword[if] identifier[name] keyword[in] identifier[cls] . identifier[__dict__] :
identifier[obj] = identifier[cls] . identifier[__dict__] [ identifier[name] ]
keyword[else] :
identifier[obj] = identifier[getattr] ( identifier[cls] , identifier[name] )
identifier[homecls] = identifier[getattr] ( identifier[obj] , literal[string] , keyword[None] )
keyword[if] identifier[homecls] keyword[is] keyword[None] :
keyword[for] identifier[base] keyword[in] identifier[mro] :
keyword[if] identifier[name] keyword[in] identifier[base] . identifier[__dict__] :
identifier[homecls] = identifier[base]
keyword[break]
keyword[if] identifier[homecls] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] keyword[in] identifier[homecls] . identifier[__dict__] :
identifier[obj] = identifier[homecls] . identifier[__dict__] [ identifier[name] ]
identifier[obj_via_getattr] = identifier[getattr] ( identifier[cls] , identifier[name] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[staticmethod] ):
identifier[kind] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[classmethod] ):
identifier[kind] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[property] ):
identifier[kind] = literal[string]
keyword[elif] ( identifier[ismethod] ( identifier[obj_via_getattr] ) keyword[or]
identifier[ismethoddescriptor] ( identifier[obj_via_getattr] )):
identifier[kind] = literal[string]
keyword[else] :
identifier[kind] = literal[string]
identifier[result] . identifier[append] (( identifier[name] , identifier[kind] , identifier[homecls] , identifier[obj] ))
keyword[return] identifier[result] | def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name] # depends on [control=['if'], data=['name']]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, '__objclass__', None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['base']] # depends on [control=['if'], data=['homecls']]
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name] # depends on [control=['if'], data=[]]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = 'static method' # depends on [control=['if'], data=[]]
elif isinstance(obj, classmethod):
kind = 'class method' # depends on [control=['if'], data=[]]
elif isinstance(obj, property):
kind = 'property' # depends on [control=['if'], data=[]]
elif ismethod(obj_via_getattr) or ismethoddescriptor(obj_via_getattr):
kind = 'method' # depends on [control=['if'], data=[]]
else:
kind = 'data'
result.append((name, kind, homecls, obj)) # depends on [control=['for'], data=['name']]
return result |
def get_jens_unstable(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield triples of nodes (A, B, C) where ``A -> B``, ``A -| C``, and ``C positiveCorrelation A``.
Calculated efficiently using the Jens Transformation.
"""
r = jens_transformation_alpha(graph)
return get_triangles(r) | def function[get_jens_unstable, parameter[graph]]:
constant[Yield triples of nodes (A, B, C) where ``A -> B``, ``A -| C``, and ``C positiveCorrelation A``.
Calculated efficiently using the Jens Transformation.
]
variable[r] assign[=] call[name[jens_transformation_alpha], parameter[name[graph]]]
return[call[name[get_triangles], parameter[name[r]]]] | keyword[def] identifier[get_jens_unstable] ( identifier[graph] : identifier[BELGraph] )-> identifier[Iterable] [ identifier[NodeTriple] ]:
literal[string]
identifier[r] = identifier[jens_transformation_alpha] ( identifier[graph] )
keyword[return] identifier[get_triangles] ( identifier[r] ) | def get_jens_unstable(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield triples of nodes (A, B, C) where ``A -> B``, ``A -| C``, and ``C positiveCorrelation A``.
Calculated efficiently using the Jens Transformation.
"""
r = jens_transformation_alpha(graph)
return get_triangles(r) |
def qubo_circuit(
graph: nx.Graph,
steps: int,
beta: Sequence,
gamma: Sequence) -> Circuit:
"""
A QAOA circuit for the Quadratic Unconstrained Binary Optimization
problem (i.e. an Ising model).
Args:
graph : a networkx graph instance with optional edge and node weights
steps : number of QAOA steps
beta : driver parameters (One per step)
gamma : cost parameters (One per step)
"""
qubits = list(graph.nodes())
# Initialization
circ = Circuit()
for q0 in qubits:
circ += H(q0)
# Run for given number of QAOA steps
for p in range(0, steps):
# Cost
for q0, q1 in graph.edges():
weight = graph[q0][q1].get('weight', 1.0)
# Note factor of pi due to parameterization of ZZ gate
circ += ZZ(-weight * gamma[p] / np.pi, q0, q1)
for q0 in qubits:
node_weight = graph.nodes[q0].get('weight', None)
if node_weight is not None:
circ += RZ(node_weight, q0)
# Drive
for q0 in qubits:
circ += RX(beta[p], q0)
return circ | def function[qubo_circuit, parameter[graph, steps, beta, gamma]]:
constant[
A QAOA circuit for the Quadratic Unconstrained Binary Optimization
problem (i.e. an Ising model).
Args:
graph : a networkx graph instance with optional edge and node weights
steps : number of QAOA steps
beta : driver parameters (One per step)
gamma : cost parameters (One per step)
]
variable[qubits] assign[=] call[name[list], parameter[call[name[graph].nodes, parameter[]]]]
variable[circ] assign[=] call[name[Circuit], parameter[]]
for taget[name[q0]] in starred[name[qubits]] begin[:]
<ast.AugAssign object at 0x7da20c6c6860>
for taget[name[p]] in starred[call[name[range], parameter[constant[0], name[steps]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6c6620>, <ast.Name object at 0x7da20c6c5ff0>]]] in starred[call[name[graph].edges, parameter[]]] begin[:]
variable[weight] assign[=] call[call[call[name[graph]][name[q0]]][name[q1]].get, parameter[constant[weight], constant[1.0]]]
<ast.AugAssign object at 0x7da20c6c5570>
for taget[name[q0]] in starred[name[qubits]] begin[:]
variable[node_weight] assign[=] call[call[name[graph].nodes][name[q0]].get, parameter[constant[weight], constant[None]]]
if compare[name[node_weight] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b170e4a0>
for taget[name[q0]] in starred[name[qubits]] begin[:]
<ast.AugAssign object at 0x7da20c6c59f0>
return[name[circ]] | keyword[def] identifier[qubo_circuit] (
identifier[graph] : identifier[nx] . identifier[Graph] ,
identifier[steps] : identifier[int] ,
identifier[beta] : identifier[Sequence] ,
identifier[gamma] : identifier[Sequence] )-> identifier[Circuit] :
literal[string]
identifier[qubits] = identifier[list] ( identifier[graph] . identifier[nodes] ())
identifier[circ] = identifier[Circuit] ()
keyword[for] identifier[q0] keyword[in] identifier[qubits] :
identifier[circ] += identifier[H] ( identifier[q0] )
keyword[for] identifier[p] keyword[in] identifier[range] ( literal[int] , identifier[steps] ):
keyword[for] identifier[q0] , identifier[q1] keyword[in] identifier[graph] . identifier[edges] ():
identifier[weight] = identifier[graph] [ identifier[q0] ][ identifier[q1] ]. identifier[get] ( literal[string] , literal[int] )
identifier[circ] += identifier[ZZ] (- identifier[weight] * identifier[gamma] [ identifier[p] ]/ identifier[np] . identifier[pi] , identifier[q0] , identifier[q1] )
keyword[for] identifier[q0] keyword[in] identifier[qubits] :
identifier[node_weight] = identifier[graph] . identifier[nodes] [ identifier[q0] ]. identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[node_weight] keyword[is] keyword[not] keyword[None] :
identifier[circ] += identifier[RZ] ( identifier[node_weight] , identifier[q0] )
keyword[for] identifier[q0] keyword[in] identifier[qubits] :
identifier[circ] += identifier[RX] ( identifier[beta] [ identifier[p] ], identifier[q0] )
keyword[return] identifier[circ] | def qubo_circuit(graph: nx.Graph, steps: int, beta: Sequence, gamma: Sequence) -> Circuit:
"""
A QAOA circuit for the Quadratic Unconstrained Binary Optimization
problem (i.e. an Ising model).
Args:
graph : a networkx graph instance with optional edge and node weights
steps : number of QAOA steps
beta : driver parameters (One per step)
gamma : cost parameters (One per step)
"""
qubits = list(graph.nodes())
# Initialization
circ = Circuit()
for q0 in qubits:
circ += H(q0) # depends on [control=['for'], data=['q0']]
# Run for given number of QAOA steps
for p in range(0, steps):
# Cost
for (q0, q1) in graph.edges():
weight = graph[q0][q1].get('weight', 1.0)
# Note factor of pi due to parameterization of ZZ gate
circ += ZZ(-weight * gamma[p] / np.pi, q0, q1) # depends on [control=['for'], data=[]]
for q0 in qubits:
node_weight = graph.nodes[q0].get('weight', None)
if node_weight is not None:
circ += RZ(node_weight, q0) # depends on [control=['if'], data=['node_weight']] # depends on [control=['for'], data=['q0']]
# Drive
for q0 in qubits:
circ += RX(beta[p], q0) # depends on [control=['for'], data=['q0']] # depends on [control=['for'], data=['p']]
return circ |
def fuzzy_simplicial_set(
X,
n_neighbors,
random_state,
metric,
metric_kwds={},
knn_indices=None,
knn_dists=None,
angular=False,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
verbose=False,
):
"""Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors: int
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string or function (optional, default 'euclidean')
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean (or l2)
* manhattan (or l1)
* cityblock
* braycurtis
* canberra
* chebyshev
* correlation
* cosine
* dice
* hamming
* jaccard
* kulsinski
* mahalanobis
* matching
* minkowski
* rogerstanimoto
* russellrao
* seuclidean
* sokalmichener
* sokalsneath
* sqeuclidean
* yule
* wminkowski
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
metric_kwds: dict (optional, default {})
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance.
knn_indices: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the indices of the k-nearest neighbors as a row for
each data point.
knn_dists: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the distances of the k-nearest neighbors as a row for
each data point.
angular: bool (optional, default False)
Whether to use angular/cosine distance for the random projection
forest for seeding NN-descent to determine approximate nearest
neighbors.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
fuzzy_simplicial_set: coo_matrix
A fuzzy simplicial set represented as a sparse matrix. The (i,
j) entry of the matrix represents the membership strength of the
1-simplex between the ith and jth sample points.
"""
if knn_indices is None or knn_dists is None:
knn_indices, knn_dists, _ = nearest_neighbors(
X, n_neighbors, metric, metric_kwds, angular, random_state, verbose=verbose
)
sigmas, rhos = smooth_knn_dist(
knn_dists, n_neighbors, local_connectivity=local_connectivity
)
rows, cols, vals = compute_membership_strengths(
knn_indices, knn_dists, sigmas, rhos
)
result = scipy.sparse.coo_matrix(
(vals, (rows, cols)), shape=(X.shape[0], X.shape[0])
)
result.eliminate_zeros()
transpose = result.transpose()
prod_matrix = result.multiply(transpose)
result = (
set_op_mix_ratio * (result + transpose - prod_matrix)
+ (1.0 - set_op_mix_ratio) * prod_matrix
)
result.eliminate_zeros()
return result | def function[fuzzy_simplicial_set, parameter[X, n_neighbors, random_state, metric, metric_kwds, knn_indices, knn_dists, angular, set_op_mix_ratio, local_connectivity, verbose]]:
constant[Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors: int
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string or function (optional, default 'euclidean')
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean (or l2)
* manhattan (or l1)
* cityblock
* braycurtis
* canberra
* chebyshev
* correlation
* cosine
* dice
* hamming
* jaccard
* kulsinski
* mahalanobis
* matching
* minkowski
* rogerstanimoto
* russellrao
* seuclidean
* sokalmichener
* sokalsneath
* sqeuclidean
* yule
* wminkowski
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
metric_kwds: dict (optional, default {})
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance.
knn_indices: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the indices of the k-nearest neighbors as a row for
each data point.
knn_dists: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the distances of the k-nearest neighbors as a row for
each data point.
angular: bool (optional, default False)
Whether to use angular/cosine distance for the random projection
forest for seeding NN-descent to determine approximate nearest
neighbors.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
fuzzy_simplicial_set: coo_matrix
A fuzzy simplicial set represented as a sparse matrix. The (i,
j) entry of the matrix represents the membership strength of the
1-simplex between the ith and jth sample points.
]
if <ast.BoolOp object at 0x7da20c993c40> begin[:]
<ast.Tuple object at 0x7da20c9939d0> assign[=] call[name[nearest_neighbors], parameter[name[X], name[n_neighbors], name[metric], name[metric_kwds], name[angular], name[random_state]]]
<ast.Tuple object at 0x7da20c9933d0> assign[=] call[name[smooth_knn_dist], parameter[name[knn_dists], name[n_neighbors]]]
<ast.Tuple object at 0x7da20c991690> assign[=] call[name[compute_membership_strengths], parameter[name[knn_indices], name[knn_dists], name[sigmas], name[rhos]]]
variable[result] assign[=] call[name[scipy].sparse.coo_matrix, parameter[tuple[[<ast.Name object at 0x7da20c991bd0>, <ast.Tuple object at 0x7da20c9926e0>]]]]
call[name[result].eliminate_zeros, parameter[]]
variable[transpose] assign[=] call[name[result].transpose, parameter[]]
variable[prod_matrix] assign[=] call[name[result].multiply, parameter[name[transpose]]]
variable[result] assign[=] binary_operation[binary_operation[name[set_op_mix_ratio] * binary_operation[binary_operation[name[result] + name[transpose]] - name[prod_matrix]]] + binary_operation[binary_operation[constant[1.0] - name[set_op_mix_ratio]] * name[prod_matrix]]]
call[name[result].eliminate_zeros, parameter[]]
return[name[result]] | keyword[def] identifier[fuzzy_simplicial_set] (
identifier[X] ,
identifier[n_neighbors] ,
identifier[random_state] ,
identifier[metric] ,
identifier[metric_kwds] ={},
identifier[knn_indices] = keyword[None] ,
identifier[knn_dists] = keyword[None] ,
identifier[angular] = keyword[False] ,
identifier[set_op_mix_ratio] = literal[int] ,
identifier[local_connectivity] = literal[int] ,
identifier[verbose] = keyword[False] ,
):
literal[string]
keyword[if] identifier[knn_indices] keyword[is] keyword[None] keyword[or] identifier[knn_dists] keyword[is] keyword[None] :
identifier[knn_indices] , identifier[knn_dists] , identifier[_] = identifier[nearest_neighbors] (
identifier[X] , identifier[n_neighbors] , identifier[metric] , identifier[metric_kwds] , identifier[angular] , identifier[random_state] , identifier[verbose] = identifier[verbose]
)
identifier[sigmas] , identifier[rhos] = identifier[smooth_knn_dist] (
identifier[knn_dists] , identifier[n_neighbors] , identifier[local_connectivity] = identifier[local_connectivity]
)
identifier[rows] , identifier[cols] , identifier[vals] = identifier[compute_membership_strengths] (
identifier[knn_indices] , identifier[knn_dists] , identifier[sigmas] , identifier[rhos]
)
identifier[result] = identifier[scipy] . identifier[sparse] . identifier[coo_matrix] (
( identifier[vals] ,( identifier[rows] , identifier[cols] )), identifier[shape] =( identifier[X] . identifier[shape] [ literal[int] ], identifier[X] . identifier[shape] [ literal[int] ])
)
identifier[result] . identifier[eliminate_zeros] ()
identifier[transpose] = identifier[result] . identifier[transpose] ()
identifier[prod_matrix] = identifier[result] . identifier[multiply] ( identifier[transpose] )
identifier[result] =(
identifier[set_op_mix_ratio] *( identifier[result] + identifier[transpose] - identifier[prod_matrix] )
+( literal[int] - identifier[set_op_mix_ratio] )* identifier[prod_matrix]
)
identifier[result] . identifier[eliminate_zeros] ()
keyword[return] identifier[result] | def fuzzy_simplicial_set(X, n_neighbors, random_state, metric, metric_kwds={}, knn_indices=None, knn_dists=None, angular=False, set_op_mix_ratio=1.0, local_connectivity=1.0, verbose=False):
"""Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors: int
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string or function (optional, default 'euclidean')
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean (or l2)
* manhattan (or l1)
* cityblock
* braycurtis
* canberra
* chebyshev
* correlation
* cosine
* dice
* hamming
* jaccard
* kulsinski
* mahalanobis
* matching
* minkowski
* rogerstanimoto
* russellrao
* seuclidean
* sokalmichener
* sokalsneath
* sqeuclidean
* yule
* wminkowski
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
metric_kwds: dict (optional, default {})
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance.
knn_indices: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the indices of the k-nearest neighbors as a row for
each data point.
knn_dists: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the distances of the k-nearest neighbors as a row for
each data point.
angular: bool (optional, default False)
Whether to use angular/cosine distance for the random projection
forest for seeding NN-descent to determine approximate nearest
neighbors.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
fuzzy_simplicial_set: coo_matrix
A fuzzy simplicial set represented as a sparse matrix. The (i,
j) entry of the matrix represents the membership strength of the
1-simplex between the ith and jth sample points.
"""
if knn_indices is None or knn_dists is None:
(knn_indices, knn_dists, _) = nearest_neighbors(X, n_neighbors, metric, metric_kwds, angular, random_state, verbose=verbose) # depends on [control=['if'], data=[]]
(sigmas, rhos) = smooth_knn_dist(knn_dists, n_neighbors, local_connectivity=local_connectivity)
(rows, cols, vals) = compute_membership_strengths(knn_indices, knn_dists, sigmas, rhos)
result = scipy.sparse.coo_matrix((vals, (rows, cols)), shape=(X.shape[0], X.shape[0]))
result.eliminate_zeros()
transpose = result.transpose()
prod_matrix = result.multiply(transpose)
result = set_op_mix_ratio * (result + transpose - prod_matrix) + (1.0 - set_op_mix_ratio) * prod_matrix
result.eliminate_zeros()
return result |
def _sendKey(self, keychr, modFlags=0, globally=False):
"""Send one character with no modifiers.
Parameters: key character or constant referring to a non-alpha-numeric
key (e.g. RETURN or TAB)
modifier flags,
global or app specific
Returns: None or raise ValueError exception
"""
escapedChrs = {
'\n': AXKeyCodeConstants.RETURN,
'\r': AXKeyCodeConstants.RETURN,
'\t': AXKeyCodeConstants.TAB,
}
if keychr in escapedChrs:
keychr = escapedChrs[keychr]
self._addKeyToQueue(keychr, modFlags, globally=globally)
self._postQueuedEvents() | def function[_sendKey, parameter[self, keychr, modFlags, globally]]:
constant[Send one character with no modifiers.
Parameters: key character or constant referring to a non-alpha-numeric
key (e.g. RETURN or TAB)
modifier flags,
global or app specific
Returns: None or raise ValueError exception
]
variable[escapedChrs] assign[=] dictionary[[<ast.Constant object at 0x7da18f810370>, <ast.Constant object at 0x7da18f810d00>, <ast.Constant object at 0x7da18f813880>], [<ast.Attribute object at 0x7da18f813ee0>, <ast.Attribute object at 0x7da18f8115d0>, <ast.Attribute object at 0x7da18f810ee0>]]
if compare[name[keychr] in name[escapedChrs]] begin[:]
variable[keychr] assign[=] call[name[escapedChrs]][name[keychr]]
call[name[self]._addKeyToQueue, parameter[name[keychr], name[modFlags]]]
call[name[self]._postQueuedEvents, parameter[]] | keyword[def] identifier[_sendKey] ( identifier[self] , identifier[keychr] , identifier[modFlags] = literal[int] , identifier[globally] = keyword[False] ):
literal[string]
identifier[escapedChrs] ={
literal[string] : identifier[AXKeyCodeConstants] . identifier[RETURN] ,
literal[string] : identifier[AXKeyCodeConstants] . identifier[RETURN] ,
literal[string] : identifier[AXKeyCodeConstants] . identifier[TAB] ,
}
keyword[if] identifier[keychr] keyword[in] identifier[escapedChrs] :
identifier[keychr] = identifier[escapedChrs] [ identifier[keychr] ]
identifier[self] . identifier[_addKeyToQueue] ( identifier[keychr] , identifier[modFlags] , identifier[globally] = identifier[globally] )
identifier[self] . identifier[_postQueuedEvents] () | def _sendKey(self, keychr, modFlags=0, globally=False):
"""Send one character with no modifiers.
Parameters: key character or constant referring to a non-alpha-numeric
key (e.g. RETURN or TAB)
modifier flags,
global or app specific
Returns: None or raise ValueError exception
"""
escapedChrs = {'\n': AXKeyCodeConstants.RETURN, '\r': AXKeyCodeConstants.RETURN, '\t': AXKeyCodeConstants.TAB}
if keychr in escapedChrs:
keychr = escapedChrs[keychr] # depends on [control=['if'], data=['keychr', 'escapedChrs']]
self._addKeyToQueue(keychr, modFlags, globally=globally)
self._postQueuedEvents() |
def OnReplaceAll(self, event):
"""Called when a replace all operation is started"""
find_string = event.GetFindString()
flags = self._wxflag2flag(event.GetFlags())
replace_string = event.GetReplaceString()
findpositions = self.grid.actions.find_all(find_string, flags)
with undo.group(_("Replace all")):
self.grid.actions.replace_all(findpositions, find_string,
replace_string)
event.Skip() | def function[OnReplaceAll, parameter[self, event]]:
constant[Called when a replace all operation is started]
variable[find_string] assign[=] call[name[event].GetFindString, parameter[]]
variable[flags] assign[=] call[name[self]._wxflag2flag, parameter[call[name[event].GetFlags, parameter[]]]]
variable[replace_string] assign[=] call[name[event].GetReplaceString, parameter[]]
variable[findpositions] assign[=] call[name[self].grid.actions.find_all, parameter[name[find_string], name[flags]]]
with call[name[undo].group, parameter[call[name[_], parameter[constant[Replace all]]]]] begin[:]
call[name[self].grid.actions.replace_all, parameter[name[findpositions], name[find_string], name[replace_string]]]
call[name[event].Skip, parameter[]] | keyword[def] identifier[OnReplaceAll] ( identifier[self] , identifier[event] ):
literal[string]
identifier[find_string] = identifier[event] . identifier[GetFindString] ()
identifier[flags] = identifier[self] . identifier[_wxflag2flag] ( identifier[event] . identifier[GetFlags] ())
identifier[replace_string] = identifier[event] . identifier[GetReplaceString] ()
identifier[findpositions] = identifier[self] . identifier[grid] . identifier[actions] . identifier[find_all] ( identifier[find_string] , identifier[flags] )
keyword[with] identifier[undo] . identifier[group] ( identifier[_] ( literal[string] )):
identifier[self] . identifier[grid] . identifier[actions] . identifier[replace_all] ( identifier[findpositions] , identifier[find_string] ,
identifier[replace_string] )
identifier[event] . identifier[Skip] () | def OnReplaceAll(self, event):
"""Called when a replace all operation is started"""
find_string = event.GetFindString()
flags = self._wxflag2flag(event.GetFlags())
replace_string = event.GetReplaceString()
findpositions = self.grid.actions.find_all(find_string, flags)
with undo.group(_('Replace all')):
self.grid.actions.replace_all(findpositions, find_string, replace_string) # depends on [control=['with'], data=[]]
event.Skip() |
def get_queryset(self):
'''We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.
We also have the query params permission_contains and object_id, which
allow users to filter the teams based on the permissions they
contain.'''
queryset = super(BaseTeamViewSet, self).get_queryset()
if self.action == 'list':
archived = get_true_false_both(
self.request.query_params, 'archived', 'false')
if archived == 'true':
queryset = queryset.filter(archived=True)
elif archived == 'false':
queryset = queryset.filter(archived=False)
permission = self.request.query_params.get(
'permission_contains', None)
if permission is not None:
queryset = queryset.filter(
permissions__type__contains=permission).distinct()
object_id = self.request.query_params.get('object_id', None)
if object_id is not None:
queryset = queryset.filter(
permissions__object_id=object_id).distinct()
namespace = self.request.query_params.get('namespace', None)
if namespace is not None:
queryset = queryset.filter(
permissions__namespace=namespace).distinct()
permission = permissions.TeamPermission()
queryset = [
team for team in queryset if
permission.has_object_permission(self.request, self, team)]
return queryset | def function[get_queryset, parameter[self]]:
constant[We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.
We also have the query params permission_contains and object_id, which
allow users to filter the teams based on the permissions they
contain.]
variable[queryset] assign[=] call[call[name[super], parameter[name[BaseTeamViewSet], name[self]]].get_queryset, parameter[]]
if compare[name[self].action equal[==] constant[list]] begin[:]
variable[archived] assign[=] call[name[get_true_false_both], parameter[name[self].request.query_params, constant[archived], constant[false]]]
if compare[name[archived] equal[==] constant[true]] begin[:]
variable[queryset] assign[=] call[name[queryset].filter, parameter[]]
variable[permission] assign[=] call[name[self].request.query_params.get, parameter[constant[permission_contains], constant[None]]]
if compare[name[permission] is_not constant[None]] begin[:]
variable[queryset] assign[=] call[call[name[queryset].filter, parameter[]].distinct, parameter[]]
variable[object_id] assign[=] call[name[self].request.query_params.get, parameter[constant[object_id], constant[None]]]
if compare[name[object_id] is_not constant[None]] begin[:]
variable[queryset] assign[=] call[call[name[queryset].filter, parameter[]].distinct, parameter[]]
variable[namespace] assign[=] call[name[self].request.query_params.get, parameter[constant[namespace], constant[None]]]
if compare[name[namespace] is_not constant[None]] begin[:]
variable[queryset] assign[=] call[call[name[queryset].filter, parameter[]].distinct, parameter[]]
variable[permission] assign[=] call[name[permissions].TeamPermission, parameter[]]
variable[queryset] assign[=] <ast.ListComp object at 0x7da18bcc8f70>
return[name[queryset]] | keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[queryset] = identifier[super] ( identifier[BaseTeamViewSet] , identifier[self] ). identifier[get_queryset] ()
keyword[if] identifier[self] . identifier[action] == literal[string] :
identifier[archived] = identifier[get_true_false_both] (
identifier[self] . identifier[request] . identifier[query_params] , literal[string] , literal[string] )
keyword[if] identifier[archived] == literal[string] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[archived] = keyword[True] )
keyword[elif] identifier[archived] == literal[string] :
identifier[queryset] = identifier[queryset] . identifier[filter] ( identifier[archived] = keyword[False] )
identifier[permission] = identifier[self] . identifier[request] . identifier[query_params] . identifier[get] (
literal[string] , keyword[None] )
keyword[if] identifier[permission] keyword[is] keyword[not] keyword[None] :
identifier[queryset] = identifier[queryset] . identifier[filter] (
identifier[permissions__type__contains] = identifier[permission] ). identifier[distinct] ()
identifier[object_id] = identifier[self] . identifier[request] . identifier[query_params] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[object_id] keyword[is] keyword[not] keyword[None] :
identifier[queryset] = identifier[queryset] . identifier[filter] (
identifier[permissions__object_id] = identifier[object_id] ). identifier[distinct] ()
identifier[namespace] = identifier[self] . identifier[request] . identifier[query_params] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[namespace] keyword[is] keyword[not] keyword[None] :
identifier[queryset] = identifier[queryset] . identifier[filter] (
identifier[permissions__namespace] = identifier[namespace] ). identifier[distinct] ()
identifier[permission] = identifier[permissions] . identifier[TeamPermission] ()
identifier[queryset] =[
identifier[team] keyword[for] identifier[team] keyword[in] identifier[queryset] keyword[if]
identifier[permission] . identifier[has_object_permission] ( identifier[self] . identifier[request] , identifier[self] , identifier[team] )]
keyword[return] identifier[queryset] | def get_queryset(self):
"""We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.
We also have the query params permission_contains and object_id, which
allow users to filter the teams based on the permissions they
contain."""
queryset = super(BaseTeamViewSet, self).get_queryset()
if self.action == 'list':
archived = get_true_false_both(self.request.query_params, 'archived', 'false')
if archived == 'true':
queryset = queryset.filter(archived=True) # depends on [control=['if'], data=[]]
elif archived == 'false':
queryset = queryset.filter(archived=False) # depends on [control=['if'], data=[]]
permission = self.request.query_params.get('permission_contains', None)
if permission is not None:
queryset = queryset.filter(permissions__type__contains=permission).distinct() # depends on [control=['if'], data=['permission']]
object_id = self.request.query_params.get('object_id', None)
if object_id is not None:
queryset = queryset.filter(permissions__object_id=object_id).distinct() # depends on [control=['if'], data=['object_id']]
namespace = self.request.query_params.get('namespace', None)
if namespace is not None:
queryset = queryset.filter(permissions__namespace=namespace).distinct() # depends on [control=['if'], data=['namespace']]
permission = permissions.TeamPermission()
queryset = [team for team in queryset if permission.has_object_permission(self.request, self, team)] # depends on [control=['if'], data=[]]
return queryset |
def mat2quat(rmat, precise=False):
"""
Converts given rotation matrix to quaternion.
Args:
rmat: 3x3 rotation matrix
precise: If isprecise is True, the input matrix is assumed to be a precise
rotation matrix and a faster algorithm is used.
Returns:
vec4 float quaternion angles
"""
M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]
if precise:
q = np.empty((4,))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array(
[
[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
]
)
K /= 3.0
# quaternion is Eigen vector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q[[1, 2, 3, 0]] | def function[mat2quat, parameter[rmat, precise]]:
constant[
Converts given rotation matrix to quaternion.
Args:
rmat: 3x3 rotation matrix
precise: If isprecise is True, the input matrix is assumed to be a precise
rotation matrix and a faster algorithm is used.
Returns:
vec4 float quaternion angles
]
variable[M] assign[=] call[call[name[np].array, parameter[name[rmat]]]][tuple[[<ast.Slice object at 0x7da2044c33a0>, <ast.Slice object at 0x7da2044c0f70>]]]
if name[precise] begin[:]
variable[q] assign[=] call[name[np].empty, parameter[tuple[[<ast.Constant object at 0x7da2044c0280>]]]]
variable[t] assign[=] call[name[np].trace, parameter[name[M]]]
if compare[name[t] greater[>] call[name[M]][tuple[[<ast.Constant object at 0x7da2044c2320>, <ast.Constant object at 0x7da2044c0a00>]]]] begin[:]
call[name[q]][constant[0]] assign[=] name[t]
call[name[q]][constant[3]] assign[=] binary_operation[call[name[M]][tuple[[<ast.Constant object at 0x7da2044c0100>, <ast.Constant object at 0x7da2044c1270>]]] - call[name[M]][tuple[[<ast.Constant object at 0x7da2044c3580>, <ast.Constant object at 0x7da2044c0910>]]]]
call[name[q]][constant[2]] assign[=] binary_operation[call[name[M]][tuple[[<ast.Constant object at 0x7da2044c38b0>, <ast.Constant object at 0x7da2044c1b70>]]] - call[name[M]][tuple[[<ast.Constant object at 0x7da2044c3730>, <ast.Constant object at 0x7da2044c3ee0>]]]]
call[name[q]][constant[1]] assign[=] binary_operation[call[name[M]][tuple[[<ast.Constant object at 0x7da2044c3010>, <ast.Constant object at 0x7da2044c13c0>]]] - call[name[M]][tuple[[<ast.Constant object at 0x7da2044c3250>, <ast.Constant object at 0x7da2044c3b50>]]]]
<ast.AugAssign object at 0x7da20c6c5e10>
if compare[call[name[q]][constant[0]] less[<] constant[0.0]] begin[:]
call[name[np].negative, parameter[name[q], name[q]]]
return[call[name[q]][list[[<ast.Constant object at 0x7da18f58cb50>, <ast.Constant object at 0x7da18f58ed70>, <ast.Constant object at 0x7da18f58cdf0>, <ast.Constant object at 0x7da18f58d600>]]]] | keyword[def] identifier[mat2quat] ( identifier[rmat] , identifier[precise] = keyword[False] ):
literal[string]
identifier[M] = identifier[np] . identifier[array] ( identifier[rmat] , identifier[dtype] = identifier[np] . identifier[float32] , identifier[copy] = keyword[False] )[: literal[int] ,: literal[int] ]
keyword[if] identifier[precise] :
identifier[q] = identifier[np] . identifier[empty] (( literal[int] ,))
identifier[t] = identifier[np] . identifier[trace] ( identifier[M] )
keyword[if] identifier[t] > identifier[M] [ literal[int] , literal[int] ]:
identifier[q] [ literal[int] ]= identifier[t]
identifier[q] [ literal[int] ]= identifier[M] [ literal[int] , literal[int] ]- identifier[M] [ literal[int] , literal[int] ]
identifier[q] [ literal[int] ]= identifier[M] [ literal[int] , literal[int] ]- identifier[M] [ literal[int] , literal[int] ]
identifier[q] [ literal[int] ]= identifier[M] [ literal[int] , literal[int] ]- identifier[M] [ literal[int] , literal[int] ]
keyword[else] :
identifier[i] , identifier[j] , identifier[k] = literal[int] , literal[int] , literal[int]
keyword[if] identifier[M] [ literal[int] , literal[int] ]> identifier[M] [ literal[int] , literal[int] ]:
identifier[i] , identifier[j] , identifier[k] = literal[int] , literal[int] , literal[int]
keyword[if] identifier[M] [ literal[int] , literal[int] ]> identifier[M] [ identifier[i] , identifier[i] ]:
identifier[i] , identifier[j] , identifier[k] = literal[int] , literal[int] , literal[int]
identifier[t] = identifier[M] [ identifier[i] , identifier[i] ]-( identifier[M] [ identifier[j] , identifier[j] ]+ identifier[M] [ identifier[k] , identifier[k] ])+ identifier[M] [ literal[int] , literal[int] ]
identifier[q] [ identifier[i] ]= identifier[t]
identifier[q] [ identifier[j] ]= identifier[M] [ identifier[i] , identifier[j] ]+ identifier[M] [ identifier[j] , identifier[i] ]
identifier[q] [ identifier[k] ]= identifier[M] [ identifier[k] , identifier[i] ]+ identifier[M] [ identifier[i] , identifier[k] ]
identifier[q] [ literal[int] ]= identifier[M] [ identifier[k] , identifier[j] ]- identifier[M] [ identifier[j] , identifier[k] ]
identifier[q] = identifier[q] [[ literal[int] , literal[int] , literal[int] , literal[int] ]]
identifier[q] *= literal[int] / identifier[math] . identifier[sqrt] ( identifier[t] * identifier[M] [ literal[int] , literal[int] ])
keyword[else] :
identifier[m00] = identifier[M] [ literal[int] , literal[int] ]
identifier[m01] = identifier[M] [ literal[int] , literal[int] ]
identifier[m02] = identifier[M] [ literal[int] , literal[int] ]
identifier[m10] = identifier[M] [ literal[int] , literal[int] ]
identifier[m11] = identifier[M] [ literal[int] , literal[int] ]
identifier[m12] = identifier[M] [ literal[int] , literal[int] ]
identifier[m20] = identifier[M] [ literal[int] , literal[int] ]
identifier[m21] = identifier[M] [ literal[int] , literal[int] ]
identifier[m22] = identifier[M] [ literal[int] , literal[int] ]
identifier[K] = identifier[np] . identifier[array] (
[
[ identifier[m00] - identifier[m11] - identifier[m22] , literal[int] , literal[int] , literal[int] ],
[ identifier[m01] + identifier[m10] , identifier[m11] - identifier[m00] - identifier[m22] , literal[int] , literal[int] ],
[ identifier[m02] + identifier[m20] , identifier[m12] + identifier[m21] , identifier[m22] - identifier[m00] - identifier[m11] , literal[int] ],
[ identifier[m21] - identifier[m12] , identifier[m02] - identifier[m20] , identifier[m10] - identifier[m01] , identifier[m00] + identifier[m11] + identifier[m22] ],
]
)
identifier[K] /= literal[int]
identifier[w] , identifier[V] = identifier[np] . identifier[linalg] . identifier[eigh] ( identifier[K] )
identifier[q] = identifier[V] [[ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[np] . identifier[argmax] ( identifier[w] )]
keyword[if] identifier[q] [ literal[int] ]< literal[int] :
identifier[np] . identifier[negative] ( identifier[q] , identifier[q] )
keyword[return] identifier[q] [[ literal[int] , literal[int] , literal[int] , literal[int] ]] | def mat2quat(rmat, precise=False):
"""
Converts given rotation matrix to quaternion.
Args:
rmat: 3x3 rotation matrix
precise: If isprecise is True, the input matrix is assumed to be a precise
rotation matrix and a faster algorithm is used.
Returns:
vec4 float quaternion angles
"""
M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]
if precise:
q = np.empty((4,))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2] # depends on [control=['if'], data=['t']]
else:
(i, j, k) = (0, 1, 2)
if M[1, 1] > M[0, 0]:
(i, j, k) = (1, 2, 0) # depends on [control=['if'], data=[]]
if M[2, 2] > M[i, i]:
(i, j, k) = (2, 0, 1) # depends on [control=['if'], data=[]]
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3]) # depends on [control=['if'], data=[]]
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], [m01 + m10, m11 - m00 - m22, 0.0, 0.0], [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]])
K /= 3.0
# quaternion is Eigen vector of K that corresponds to largest eigenvalue
(w, V) = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q) # depends on [control=['if'], data=[]]
return q[[1, 2, 3, 0]] |
def find_method(func):
"""
Decorator that manages smart defaults or transforms for common find methods:
- fields/projection: list of fields to be returned. Contrary to pymongo, _id won't be added automatically
- json: performs a json_clone on the results. Beware of performance!
- timeout
- return_document
"""
def wrapped(*args, **kwargs):
# Normalize the fields argument if passed as a positional param.
if len(args) == 3 and func.__name__ in ("find", "find_one", "find_by_id", "find_by_ids"):
_param_fields(kwargs, args[2])
args = (args[0], args[1])
elif "fields" in kwargs:
_param_fields(kwargs, kwargs["fields"])
del kwargs["fields"]
elif "projection" in kwargs:
_param_fields(kwargs, kwargs["projection"])
if "timeout" in kwargs:
kwargs["no_cursor_timeout"] = not bool(kwargs["timeout"])
del kwargs["timeout"]
if "spec" in kwargs:
kwargs["filter"] = kwargs["spec"]
del kwargs["spec"]
if kwargs.get("return_document") == "after":
kwargs["return_document"] = ReturnDocument.AFTER
elif kwargs.get("return_document") == "before":
kwargs["return_document"] = ReturnDocument.BEFORE
ret = func(*args, **kwargs)
if kwargs.get("json"):
ret = json_clone(ret)
return ret
return wrapped | def function[find_method, parameter[func]]:
constant[
Decorator that manages smart defaults or transforms for common find methods:
- fields/projection: list of fields to be returned. Contrary to pymongo, _id won't be added automatically
- json: performs a json_clone on the results. Beware of performance!
- timeout
- return_document
]
def function[wrapped, parameter[]]:
if <ast.BoolOp object at 0x7da20c6c4d90> begin[:]
call[name[_param_fields], parameter[name[kwargs], call[name[args]][constant[2]]]]
variable[args] assign[=] tuple[[<ast.Subscript object at 0x7da1b265fd00>, <ast.Subscript object at 0x7da1b265e050>]]
if compare[constant[timeout] in name[kwargs]] begin[:]
call[name[kwargs]][constant[no_cursor_timeout]] assign[=] <ast.UnaryOp object at 0x7da1b265f280>
<ast.Delete object at 0x7da1b265f550>
if compare[constant[spec] in name[kwargs]] begin[:]
call[name[kwargs]][constant[filter]] assign[=] call[name[kwargs]][constant[spec]]
<ast.Delete object at 0x7da1b26488b0>
if compare[call[name[kwargs].get, parameter[constant[return_document]]] equal[==] constant[after]] begin[:]
call[name[kwargs]][constant[return_document]] assign[=] name[ReturnDocument].AFTER
variable[ret] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b2649e10>]]
if call[name[kwargs].get, parameter[constant[json]]] begin[:]
variable[ret] assign[=] call[name[json_clone], parameter[name[ret]]]
return[name[ret]]
return[name[wrapped]] | keyword[def] identifier[find_method] ( identifier[func] ):
literal[string]
keyword[def] identifier[wrapped] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[len] ( identifier[args] )== literal[int] keyword[and] identifier[func] . identifier[__name__] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[_param_fields] ( identifier[kwargs] , identifier[args] [ literal[int] ])
identifier[args] =( identifier[args] [ literal[int] ], identifier[args] [ literal[int] ])
keyword[elif] literal[string] keyword[in] identifier[kwargs] :
identifier[_param_fields] ( identifier[kwargs] , identifier[kwargs] [ literal[string] ])
keyword[del] identifier[kwargs] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[kwargs] :
identifier[_param_fields] ( identifier[kwargs] , identifier[kwargs] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= keyword[not] identifier[bool] ( identifier[kwargs] [ literal[string] ])
keyword[del] identifier[kwargs] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
keyword[del] identifier[kwargs] [ literal[string] ]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] )== literal[string] :
identifier[kwargs] [ literal[string] ]= identifier[ReturnDocument] . identifier[AFTER]
keyword[elif] identifier[kwargs] . identifier[get] ( literal[string] )== literal[string] :
identifier[kwargs] [ literal[string] ]= identifier[ReturnDocument] . identifier[BEFORE]
identifier[ret] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[ret] = identifier[json_clone] ( identifier[ret] )
keyword[return] identifier[ret]
keyword[return] identifier[wrapped] | def find_method(func):
"""
Decorator that manages smart defaults or transforms for common find methods:
- fields/projection: list of fields to be returned. Contrary to pymongo, _id won't be added automatically
- json: performs a json_clone on the results. Beware of performance!
- timeout
- return_document
"""
def wrapped(*args, **kwargs):
# Normalize the fields argument if passed as a positional param.
if len(args) == 3 and func.__name__ in ('find', 'find_one', 'find_by_id', 'find_by_ids'):
_param_fields(kwargs, args[2])
args = (args[0], args[1]) # depends on [control=['if'], data=[]]
elif 'fields' in kwargs:
_param_fields(kwargs, kwargs['fields'])
del kwargs['fields'] # depends on [control=['if'], data=['kwargs']]
elif 'projection' in kwargs:
_param_fields(kwargs, kwargs['projection']) # depends on [control=['if'], data=['kwargs']]
if 'timeout' in kwargs:
kwargs['no_cursor_timeout'] = not bool(kwargs['timeout'])
del kwargs['timeout'] # depends on [control=['if'], data=['kwargs']]
if 'spec' in kwargs:
kwargs['filter'] = kwargs['spec']
del kwargs['spec'] # depends on [control=['if'], data=['kwargs']]
if kwargs.get('return_document') == 'after':
kwargs['return_document'] = ReturnDocument.AFTER # depends on [control=['if'], data=[]]
elif kwargs.get('return_document') == 'before':
kwargs['return_document'] = ReturnDocument.BEFORE # depends on [control=['if'], data=[]]
ret = func(*args, **kwargs)
if kwargs.get('json'):
ret = json_clone(ret) # depends on [control=['if'], data=[]]
return ret
return wrapped |
def twostep_count_matrix(dtrajs, lag, N):
"""
Compute all two-step count matrices from discrete trajectories.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
the lag time for count matrix estimation
N : int
the number of states in the discrete trajectories.
Returns
-------
C2t : sparse csc-matrix (N, N, N)
two-step count matrices for all states. C2t[:, n, :] is a count matrix for each n
"""
# List all transition triples:
rows = []
cols = []
states = []
for dtraj in dtrajs:
if dtraj.size > 2*lag:
rows.append(dtraj[0:-2*lag])
states.append(dtraj[lag:-lag])
cols.append(dtraj[2*lag:])
row = np.concatenate(rows)
col = np.concatenate(cols)
state = np.concatenate(states)
data = np.ones(row.size)
# Transform the rows and cols into a single list with N*+2 possible values:
pair = N * row + col
# Estimate sparse matrix:
C2t = scipy.sparse.coo_matrix((data, (pair, state)), shape=(N*N, N))
return C2t.tocsc() | def function[twostep_count_matrix, parameter[dtrajs, lag, N]]:
constant[
Compute all two-step count matrices from discrete trajectories.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
the lag time for count matrix estimation
N : int
the number of states in the discrete trajectories.
Returns
-------
C2t : sparse csc-matrix (N, N, N)
two-step count matrices for all states. C2t[:, n, :] is a count matrix for each n
]
variable[rows] assign[=] list[[]]
variable[cols] assign[=] list[[]]
variable[states] assign[=] list[[]]
for taget[name[dtraj]] in starred[name[dtrajs]] begin[:]
if compare[name[dtraj].size greater[>] binary_operation[constant[2] * name[lag]]] begin[:]
call[name[rows].append, parameter[call[name[dtraj]][<ast.Slice object at 0x7da18f00e920>]]]
call[name[states].append, parameter[call[name[dtraj]][<ast.Slice object at 0x7da18f00e020>]]]
call[name[cols].append, parameter[call[name[dtraj]][<ast.Slice object at 0x7da18f00d330>]]]
variable[row] assign[=] call[name[np].concatenate, parameter[name[rows]]]
variable[col] assign[=] call[name[np].concatenate, parameter[name[cols]]]
variable[state] assign[=] call[name[np].concatenate, parameter[name[states]]]
variable[data] assign[=] call[name[np].ones, parameter[name[row].size]]
variable[pair] assign[=] binary_operation[binary_operation[name[N] * name[row]] + name[col]]
variable[C2t] assign[=] call[name[scipy].sparse.coo_matrix, parameter[tuple[[<ast.Name object at 0x7da18f721d20>, <ast.Tuple object at 0x7da18f722e00>]]]]
return[call[name[C2t].tocsc, parameter[]]] | keyword[def] identifier[twostep_count_matrix] ( identifier[dtrajs] , identifier[lag] , identifier[N] ):
literal[string]
identifier[rows] =[]
identifier[cols] =[]
identifier[states] =[]
keyword[for] identifier[dtraj] keyword[in] identifier[dtrajs] :
keyword[if] identifier[dtraj] . identifier[size] > literal[int] * identifier[lag] :
identifier[rows] . identifier[append] ( identifier[dtraj] [ literal[int] :- literal[int] * identifier[lag] ])
identifier[states] . identifier[append] ( identifier[dtraj] [ identifier[lag] :- identifier[lag] ])
identifier[cols] . identifier[append] ( identifier[dtraj] [ literal[int] * identifier[lag] :])
identifier[row] = identifier[np] . identifier[concatenate] ( identifier[rows] )
identifier[col] = identifier[np] . identifier[concatenate] ( identifier[cols] )
identifier[state] = identifier[np] . identifier[concatenate] ( identifier[states] )
identifier[data] = identifier[np] . identifier[ones] ( identifier[row] . identifier[size] )
identifier[pair] = identifier[N] * identifier[row] + identifier[col]
identifier[C2t] = identifier[scipy] . identifier[sparse] . identifier[coo_matrix] (( identifier[data] ,( identifier[pair] , identifier[state] )), identifier[shape] =( identifier[N] * identifier[N] , identifier[N] ))
keyword[return] identifier[C2t] . identifier[tocsc] () | def twostep_count_matrix(dtrajs, lag, N):
"""
Compute all two-step count matrices from discrete trajectories.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
the lag time for count matrix estimation
N : int
the number of states in the discrete trajectories.
Returns
-------
C2t : sparse csc-matrix (N, N, N)
two-step count matrices for all states. C2t[:, n, :] is a count matrix for each n
"""
# List all transition triples:
rows = []
cols = []
states = []
for dtraj in dtrajs:
if dtraj.size > 2 * lag:
rows.append(dtraj[0:-2 * lag])
states.append(dtraj[lag:-lag])
cols.append(dtraj[2 * lag:]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dtraj']]
row = np.concatenate(rows)
col = np.concatenate(cols)
state = np.concatenate(states)
data = np.ones(row.size)
# Transform the rows and cols into a single list with N*+2 possible values:
pair = N * row + col
# Estimate sparse matrix:
C2t = scipy.sparse.coo_matrix((data, (pair, state)), shape=(N * N, N))
return C2t.tocsc() |
def put_comments(self, resource, comment, timeout=None):
""" Post a comment on a file or URL.
The initial idea of VirusTotal Community was that users should be able to make comments on files and URLs,
the comments may be malware analyses, false positive flags, disinfection instructions, etc.
Imagine you have some automatic setup that can produce interesting results related to a given sample or URL
that you submit to VirusTotal for antivirus characterization, you might want to give visibility to your setup
by automatically reviewing samples and URLs with the output of your automation.
:param resource: either a md5/sha1/sha256 hash of the file you want to review or the URL itself that you want
to comment on.
:param comment: the actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot)
and reference users using the "@" syntax (e.g. @VirusTotalTeam).
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: If the comment was successfully posted the response code will be 1, 0 otherwise.
"""
params = {'apikey': self.api_key, 'resource': resource, 'comment': comment}
try:
response = requests.post(self.base + 'comments/put', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | def function[put_comments, parameter[self, resource, comment, timeout]]:
constant[ Post a comment on a file or URL.
The initial idea of VirusTotal Community was that users should be able to make comments on files and URLs,
the comments may be malware analyses, false positive flags, disinfection instructions, etc.
Imagine you have some automatic setup that can produce interesting results related to a given sample or URL
that you submit to VirusTotal for antivirus characterization, you might want to give visibility to your setup
by automatically reviewing samples and URLs with the output of your automation.
:param resource: either a md5/sha1/sha256 hash of the file you want to review or the URL itself that you want
to comment on.
:param comment: the actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot)
and reference users using the "@" syntax (e.g. @VirusTotalTeam).
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: If the comment was successfully posted the response code will be 1, 0 otherwise.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ef1480>, <ast.Constant object at 0x7da1b0ef1ab0>, <ast.Constant object at 0x7da1b0ef2680>], [<ast.Attribute object at 0x7da1b0ef2080>, <ast.Name object at 0x7da1b0ef2a40>, <ast.Name object at 0x7da1b0ef2860>]]
<ast.Try object at 0x7da1b0ef2920>
return[call[name[_return_response_and_status_code], parameter[name[response]]]] | keyword[def] identifier[put_comments] ( identifier[self] , identifier[resource] , identifier[comment] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[self] . identifier[api_key] , literal[string] : identifier[resource] , literal[string] : identifier[comment] }
keyword[try] :
identifier[response] = identifier[requests] . identifier[post] ( identifier[self] . identifier[base] + literal[string] , identifier[params] = identifier[params] , identifier[proxies] = identifier[self] . identifier[proxies] , identifier[timeout] = identifier[timeout] )
keyword[except] identifier[requests] . identifier[RequestException] keyword[as] identifier[e] :
keyword[return] identifier[dict] ( identifier[error] = identifier[str] ( identifier[e] ))
keyword[return] identifier[_return_response_and_status_code] ( identifier[response] ) | def put_comments(self, resource, comment, timeout=None):
""" Post a comment on a file or URL.
The initial idea of VirusTotal Community was that users should be able to make comments on files and URLs,
the comments may be malware analyses, false positive flags, disinfection instructions, etc.
Imagine you have some automatic setup that can produce interesting results related to a given sample or URL
that you submit to VirusTotal for antivirus characterization, you might want to give visibility to your setup
by automatically reviewing samples and URLs with the output of your automation.
:param resource: either a md5/sha1/sha256 hash of the file you want to review or the URL itself that you want
to comment on.
:param comment: the actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot)
and reference users using the "@" syntax (e.g. @VirusTotalTeam).
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: If the comment was successfully posted the response code will be 1, 0 otherwise.
"""
params = {'apikey': self.api_key, 'resource': resource, 'comment': comment}
try:
response = requests.post(self.base + 'comments/put', params=params, proxies=self.proxies, timeout=timeout) # depends on [control=['try'], data=[]]
except requests.RequestException as e:
return dict(error=str(e)) # depends on [control=['except'], data=['e']]
return _return_response_and_status_code(response) |
def asQuartusTcl(self, buff: List[str], version: str, component: "Component",
packager: "IpPackager", thisIf: 'Interface'):
"""
Add interface to Quartus tcl
:param buff: line buffer for output
:param version: Quartus version
:param intfName: name of top interface
:param component: component object from ipcore generator
:param packager: instance of IpPackager which is packagin current design
:param allInterfaces: list of all interfaces of top unit
:param thisIf: interface to add into Quartus TCL
"""
name = packager.getInterfaceLogicalName(thisIf)
self.quartus_tcl_add_interface(buff, thisIf, packager)
clk = thisIf._getAssociatedClk()
if clk is not None:
self.quartus_prop(buff, name, "associatedClock",
clk._sigInside.name, escapeStr=False)
rst = thisIf._getAssociatedRst()
if rst is not None:
self.quartus_prop(buff, name, "associatedReset",
rst._sigInside.name, escapeStr=False)
m = self.get_quartus_map()
if m:
intfMapOrName = m
else:
intfMapOrName = thisIf.name
self._asQuartusTcl(buff, version, name, component,
packager, thisIf, intfMapOrName) | def function[asQuartusTcl, parameter[self, buff, version, component, packager, thisIf]]:
constant[
Add interface to Quartus tcl
:param buff: line buffer for output
:param version: Quartus version
:param intfName: name of top interface
:param component: component object from ipcore generator
:param packager: instance of IpPackager which is packagin current design
:param allInterfaces: list of all interfaces of top unit
:param thisIf: interface to add into Quartus TCL
]
variable[name] assign[=] call[name[packager].getInterfaceLogicalName, parameter[name[thisIf]]]
call[name[self].quartus_tcl_add_interface, parameter[name[buff], name[thisIf], name[packager]]]
variable[clk] assign[=] call[name[thisIf]._getAssociatedClk, parameter[]]
if compare[name[clk] is_not constant[None]] begin[:]
call[name[self].quartus_prop, parameter[name[buff], name[name], constant[associatedClock], name[clk]._sigInside.name]]
variable[rst] assign[=] call[name[thisIf]._getAssociatedRst, parameter[]]
if compare[name[rst] is_not constant[None]] begin[:]
call[name[self].quartus_prop, parameter[name[buff], name[name], constant[associatedReset], name[rst]._sigInside.name]]
variable[m] assign[=] call[name[self].get_quartus_map, parameter[]]
if name[m] begin[:]
variable[intfMapOrName] assign[=] name[m]
call[name[self]._asQuartusTcl, parameter[name[buff], name[version], name[name], name[component], name[packager], name[thisIf], name[intfMapOrName]]] | keyword[def] identifier[asQuartusTcl] ( identifier[self] , identifier[buff] : identifier[List] [ identifier[str] ], identifier[version] : identifier[str] , identifier[component] : literal[string] ,
identifier[packager] : literal[string] , identifier[thisIf] : literal[string] ):
literal[string]
identifier[name] = identifier[packager] . identifier[getInterfaceLogicalName] ( identifier[thisIf] )
identifier[self] . identifier[quartus_tcl_add_interface] ( identifier[buff] , identifier[thisIf] , identifier[packager] )
identifier[clk] = identifier[thisIf] . identifier[_getAssociatedClk] ()
keyword[if] identifier[clk] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[quartus_prop] ( identifier[buff] , identifier[name] , literal[string] ,
identifier[clk] . identifier[_sigInside] . identifier[name] , identifier[escapeStr] = keyword[False] )
identifier[rst] = identifier[thisIf] . identifier[_getAssociatedRst] ()
keyword[if] identifier[rst] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[quartus_prop] ( identifier[buff] , identifier[name] , literal[string] ,
identifier[rst] . identifier[_sigInside] . identifier[name] , identifier[escapeStr] = keyword[False] )
identifier[m] = identifier[self] . identifier[get_quartus_map] ()
keyword[if] identifier[m] :
identifier[intfMapOrName] = identifier[m]
keyword[else] :
identifier[intfMapOrName] = identifier[thisIf] . identifier[name]
identifier[self] . identifier[_asQuartusTcl] ( identifier[buff] , identifier[version] , identifier[name] , identifier[component] ,
identifier[packager] , identifier[thisIf] , identifier[intfMapOrName] ) | def asQuartusTcl(self, buff: List[str], version: str, component: 'Component', packager: 'IpPackager', thisIf: 'Interface'):
"""
Add interface to Quartus tcl
:param buff: line buffer for output
:param version: Quartus version
:param intfName: name of top interface
:param component: component object from ipcore generator
:param packager: instance of IpPackager which is packagin current design
:param allInterfaces: list of all interfaces of top unit
:param thisIf: interface to add into Quartus TCL
"""
name = packager.getInterfaceLogicalName(thisIf)
self.quartus_tcl_add_interface(buff, thisIf, packager)
clk = thisIf._getAssociatedClk()
if clk is not None:
self.quartus_prop(buff, name, 'associatedClock', clk._sigInside.name, escapeStr=False) # depends on [control=['if'], data=['clk']]
rst = thisIf._getAssociatedRst()
if rst is not None:
self.quartus_prop(buff, name, 'associatedReset', rst._sigInside.name, escapeStr=False) # depends on [control=['if'], data=['rst']]
m = self.get_quartus_map()
if m:
intfMapOrName = m # depends on [control=['if'], data=[]]
else:
intfMapOrName = thisIf.name
self._asQuartusTcl(buff, version, name, component, packager, thisIf, intfMapOrName) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.