code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def pvector_field(item_type, optional=False, initial=()):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional,
initial) | def function[pvector_field, parameter[item_type, optional, initial]]:
constant[
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
]
return[call[name[_sequence_field], parameter[name[CheckedPVector], name[item_type], name[optional], name[initial]]]] | keyword[def] identifier[pvector_field] ( identifier[item_type] , identifier[optional] = keyword[False] , identifier[initial] =()):
literal[string]
keyword[return] identifier[_sequence_field] ( identifier[CheckedPVector] , identifier[item_type] , identifier[optional] ,
identifier[initial] ) | def pvector_field(item_type, optional=False, initial=()):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional, initial) |
def generate_pdf_report(self, impact_function, iface, scenario_name):
"""Generate and store map and impact report from impact function.
Directory where the report stored is specified by user input from the
dialog. This function is adapted from analysis_utilities.py
:param impact_function: Impact Function.
:type impact_function: ImpactFunction()
:param iface: iface.
:type iface: iface
:param scenario_name: name of the scenario
:type scenario_name: str
"""
# output folder
output_dir = self.output_directory.text()
file_path = os.path.join(output_dir, scenario_name)
# create impact table report instance
table_report_metadata = ReportMetadata(
metadata_dict=standard_impact_report_metadata_pdf)
impact_table_report = ImpactReport(
iface,
table_report_metadata,
impact_function=impact_function)
impact_table_report.output_folder = file_path
impact_table_report.process_components()
# create impact map report instance
map_report_metadata = ReportMetadata(
metadata_dict=update_template_component(map_report))
impact_map_report = ImpactReport(
iface,
map_report_metadata,
impact_function=impact_function)
# TODO: Get from settings file
# get the extent of impact layer
impact_map_report.qgis_composition_context.extent = \
impact_function.impact.extent()
impact_map_report.output_folder = file_path
impact_map_report.process_components() | def function[generate_pdf_report, parameter[self, impact_function, iface, scenario_name]]:
constant[Generate and store map and impact report from impact function.
Directory where the report stored is specified by user input from the
dialog. This function is adapted from analysis_utilities.py
:param impact_function: Impact Function.
:type impact_function: ImpactFunction()
:param iface: iface.
:type iface: iface
:param scenario_name: name of the scenario
:type scenario_name: str
]
variable[output_dir] assign[=] call[name[self].output_directory.text, parameter[]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[output_dir], name[scenario_name]]]
variable[table_report_metadata] assign[=] call[name[ReportMetadata], parameter[]]
variable[impact_table_report] assign[=] call[name[ImpactReport], parameter[name[iface], name[table_report_metadata]]]
name[impact_table_report].output_folder assign[=] name[file_path]
call[name[impact_table_report].process_components, parameter[]]
variable[map_report_metadata] assign[=] call[name[ReportMetadata], parameter[]]
variable[impact_map_report] assign[=] call[name[ImpactReport], parameter[name[iface], name[map_report_metadata]]]
name[impact_map_report].qgis_composition_context.extent assign[=] call[name[impact_function].impact.extent, parameter[]]
name[impact_map_report].output_folder assign[=] name[file_path]
call[name[impact_map_report].process_components, parameter[]] | keyword[def] identifier[generate_pdf_report] ( identifier[self] , identifier[impact_function] , identifier[iface] , identifier[scenario_name] ):
literal[string]
identifier[output_dir] = identifier[self] . identifier[output_directory] . identifier[text] ()
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , identifier[scenario_name] )
identifier[table_report_metadata] = identifier[ReportMetadata] (
identifier[metadata_dict] = identifier[standard_impact_report_metadata_pdf] )
identifier[impact_table_report] = identifier[ImpactReport] (
identifier[iface] ,
identifier[table_report_metadata] ,
identifier[impact_function] = identifier[impact_function] )
identifier[impact_table_report] . identifier[output_folder] = identifier[file_path]
identifier[impact_table_report] . identifier[process_components] ()
identifier[map_report_metadata] = identifier[ReportMetadata] (
identifier[metadata_dict] = identifier[update_template_component] ( identifier[map_report] ))
identifier[impact_map_report] = identifier[ImpactReport] (
identifier[iface] ,
identifier[map_report_metadata] ,
identifier[impact_function] = identifier[impact_function] )
identifier[impact_map_report] . identifier[qgis_composition_context] . identifier[extent] = identifier[impact_function] . identifier[impact] . identifier[extent] ()
identifier[impact_map_report] . identifier[output_folder] = identifier[file_path]
identifier[impact_map_report] . identifier[process_components] () | def generate_pdf_report(self, impact_function, iface, scenario_name):
"""Generate and store map and impact report from impact function.
Directory where the report stored is specified by user input from the
dialog. This function is adapted from analysis_utilities.py
:param impact_function: Impact Function.
:type impact_function: ImpactFunction()
:param iface: iface.
:type iface: iface
:param scenario_name: name of the scenario
:type scenario_name: str
"""
# output folder
output_dir = self.output_directory.text()
file_path = os.path.join(output_dir, scenario_name)
# create impact table report instance
table_report_metadata = ReportMetadata(metadata_dict=standard_impact_report_metadata_pdf)
impact_table_report = ImpactReport(iface, table_report_metadata, impact_function=impact_function)
impact_table_report.output_folder = file_path
impact_table_report.process_components()
# create impact map report instance
map_report_metadata = ReportMetadata(metadata_dict=update_template_component(map_report))
impact_map_report = ImpactReport(iface, map_report_metadata, impact_function=impact_function)
# TODO: Get from settings file
# get the extent of impact layer
impact_map_report.qgis_composition_context.extent = impact_function.impact.extent()
impact_map_report.output_folder = file_path
impact_map_report.process_components() |
def create_from_header(cls, header, ebins=None, pixels=None):
""" Creates an HPX object from a FITS header.
header : The FITS header
ebins : Energy bin edges [optional]
"""
convname = HPX.identify_HPX_convention(header)
conv = HPX_FITS_CONVENTIONS[convname]
if conv.convname not in ['GALPROP', 'GALPROP2']:
if header["PIXTYPE"] != "HEALPIX":
raise Exception("PIXTYPE != HEALPIX")
if header["PIXTYPE"] != "HEALPIX":
raise Exception("PIXTYPE != HEALPIX")
if header["ORDERING"] == "RING":
nest = False
elif header["ORDERING"] == "NESTED":
nest = True
else:
raise Exception("ORDERING != RING | NESTED")
try:
order = header["ORDER"]
except KeyError:
order = -1
if order < 0:
nside = header["NSIDE"]
else:
nside = -1
try:
coordsys = header[conv.coordsys]
except KeyError:
coordsys = header['COORDSYS']
try:
region = header["HPX_REG"]
except KeyError:
try:
region = header["HPXREGION"]
except KeyError:
region = None
try:
if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']:
use_pixels = pixels
else:
use_pixels = None
except KeyError:
use_pixels = None
return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels) | def function[create_from_header, parameter[cls, header, ebins, pixels]]:
constant[ Creates an HPX object from a FITS header.
header : The FITS header
ebins : Energy bin edges [optional]
]
variable[convname] assign[=] call[name[HPX].identify_HPX_convention, parameter[name[header]]]
variable[conv] assign[=] call[name[HPX_FITS_CONVENTIONS]][name[convname]]
if compare[name[conv].convname <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f00fc70>, <ast.Constant object at 0x7da18f00efb0>]]] begin[:]
if compare[call[name[header]][constant[PIXTYPE]] not_equal[!=] constant[HEALPIX]] begin[:]
<ast.Raise object at 0x7da18f00d9c0>
if compare[call[name[header]][constant[PIXTYPE]] not_equal[!=] constant[HEALPIX]] begin[:]
<ast.Raise object at 0x7da18f00f280>
if compare[call[name[header]][constant[ORDERING]] equal[==] constant[RING]] begin[:]
variable[nest] assign[=] constant[False]
<ast.Try object at 0x7da18f00e350>
if compare[name[order] less[<] constant[0]] begin[:]
variable[nside] assign[=] call[name[header]][constant[NSIDE]]
<ast.Try object at 0x7da18f00cfa0>
<ast.Try object at 0x7da18f00fd90>
<ast.Try object at 0x7da18f00fac0>
return[call[name[cls], parameter[name[nside], name[nest], name[coordsys], name[order], name[ebins]]]] | keyword[def] identifier[create_from_header] ( identifier[cls] , identifier[header] , identifier[ebins] = keyword[None] , identifier[pixels] = keyword[None] ):
literal[string]
identifier[convname] = identifier[HPX] . identifier[identify_HPX_convention] ( identifier[header] )
identifier[conv] = identifier[HPX_FITS_CONVENTIONS] [ identifier[convname] ]
keyword[if] identifier[conv] . identifier[convname] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] identifier[header] [ literal[string] ]!= literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[header] [ literal[string] ]!= literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[header] [ literal[string] ]== literal[string] :
identifier[nest] = keyword[False]
keyword[elif] identifier[header] [ literal[string] ]== literal[string] :
identifier[nest] = keyword[True]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[try] :
identifier[order] = identifier[header] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[order] =- literal[int]
keyword[if] identifier[order] < literal[int] :
identifier[nside] = identifier[header] [ literal[string] ]
keyword[else] :
identifier[nside] =- literal[int]
keyword[try] :
identifier[coordsys] = identifier[header] [ identifier[conv] . identifier[coordsys] ]
keyword[except] identifier[KeyError] :
identifier[coordsys] = identifier[header] [ literal[string] ]
keyword[try] :
identifier[region] = identifier[header] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[try] :
identifier[region] = identifier[header] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[region] = keyword[None]
keyword[try] :
keyword[if] identifier[header] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
identifier[use_pixels] = identifier[pixels]
keyword[else] :
identifier[use_pixels] = keyword[None]
keyword[except] identifier[KeyError] :
identifier[use_pixels] = keyword[None]
keyword[return] identifier[cls] ( identifier[nside] , identifier[nest] , identifier[coordsys] , identifier[order] , identifier[ebins] , identifier[region] = identifier[region] , identifier[conv] = identifier[conv] , identifier[pixels] = identifier[use_pixels] ) | def create_from_header(cls, header, ebins=None, pixels=None):
""" Creates an HPX object from a FITS header.
header : The FITS header
ebins : Energy bin edges [optional]
"""
convname = HPX.identify_HPX_convention(header)
conv = HPX_FITS_CONVENTIONS[convname]
if conv.convname not in ['GALPROP', 'GALPROP2']:
if header['PIXTYPE'] != 'HEALPIX':
raise Exception('PIXTYPE != HEALPIX') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if header['PIXTYPE'] != 'HEALPIX':
raise Exception('PIXTYPE != HEALPIX') # depends on [control=['if'], data=[]]
if header['ORDERING'] == 'RING':
nest = False # depends on [control=['if'], data=[]]
elif header['ORDERING'] == 'NESTED':
nest = True # depends on [control=['if'], data=[]]
else:
raise Exception('ORDERING != RING | NESTED')
try:
order = header['ORDER'] # depends on [control=['try'], data=[]]
except KeyError:
order = -1 # depends on [control=['except'], data=[]]
if order < 0:
nside = header['NSIDE'] # depends on [control=['if'], data=[]]
else:
nside = -1
try:
coordsys = header[conv.coordsys] # depends on [control=['try'], data=[]]
except KeyError:
coordsys = header['COORDSYS'] # depends on [control=['except'], data=[]]
try:
region = header['HPX_REG'] # depends on [control=['try'], data=[]]
except KeyError:
try:
region = header['HPXREGION'] # depends on [control=['try'], data=[]]
except KeyError:
region = None # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
try:
if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']:
use_pixels = pixels # depends on [control=['if'], data=[]]
else:
use_pixels = None # depends on [control=['try'], data=[]]
except KeyError:
use_pixels = None # depends on [control=['except'], data=[]]
return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels) |
def initialize(self, action):
# type: (CliContext, settings.TransferAction) -> None
"""Initialize context
:param CliContext self: this
:param settings.TransferAction action: transfer action
"""
self._init_config()
self.general_options = settings.create_general_options(
self.config, action)
self.credentials = settings.create_azure_storage_credentials(
self.config, self.general_options) | def function[initialize, parameter[self, action]]:
constant[Initialize context
:param CliContext self: this
:param settings.TransferAction action: transfer action
]
call[name[self]._init_config, parameter[]]
name[self].general_options assign[=] call[name[settings].create_general_options, parameter[name[self].config, name[action]]]
name[self].credentials assign[=] call[name[settings].create_azure_storage_credentials, parameter[name[self].config, name[self].general_options]] | keyword[def] identifier[initialize] ( identifier[self] , identifier[action] ):
literal[string]
identifier[self] . identifier[_init_config] ()
identifier[self] . identifier[general_options] = identifier[settings] . identifier[create_general_options] (
identifier[self] . identifier[config] , identifier[action] )
identifier[self] . identifier[credentials] = identifier[settings] . identifier[create_azure_storage_credentials] (
identifier[self] . identifier[config] , identifier[self] . identifier[general_options] ) | def initialize(self, action):
# type: (CliContext, settings.TransferAction) -> None
'Initialize context\n :param CliContext self: this\n :param settings.TransferAction action: transfer action\n '
self._init_config()
self.general_options = settings.create_general_options(self.config, action)
self.credentials = settings.create_azure_storage_credentials(self.config, self.general_options) |
def is_unzipped_x_duplicate(self, unzipped_x):
"""
param: unzipped_x: configuration assumed to be unzipped
"""
return self.is_zipped_x_duplicate(self.space.zip_inputs(np.atleast_2d(unzipped_x))) | def function[is_unzipped_x_duplicate, parameter[self, unzipped_x]]:
constant[
param: unzipped_x: configuration assumed to be unzipped
]
return[call[name[self].is_zipped_x_duplicate, parameter[call[name[self].space.zip_inputs, parameter[call[name[np].atleast_2d, parameter[name[unzipped_x]]]]]]]] | keyword[def] identifier[is_unzipped_x_duplicate] ( identifier[self] , identifier[unzipped_x] ):
literal[string]
keyword[return] identifier[self] . identifier[is_zipped_x_duplicate] ( identifier[self] . identifier[space] . identifier[zip_inputs] ( identifier[np] . identifier[atleast_2d] ( identifier[unzipped_x] ))) | def is_unzipped_x_duplicate(self, unzipped_x):
"""
param: unzipped_x: configuration assumed to be unzipped
"""
return self.is_zipped_x_duplicate(self.space.zip_inputs(np.atleast_2d(unzipped_x))) |
def category_playlists(self, category_id, limit=20, offset=0, country=None):
"""Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/browse/categories/{category_id}/playlists', category_id=category_id)
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country
return self.request(route, params=payload) | def function[category_playlists, parameter[self, category_id, limit, offset, country]]:
constant[Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
]
variable[route] assign[=] call[name[Route], parameter[constant[GET], constant[/browse/categories/{category_id}/playlists]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da20e957d60>, <ast.Constant object at 0x7da20e956710>], [<ast.Name object at 0x7da20e957100>, <ast.Name object at 0x7da20e9550c0>]]
if name[country] begin[:]
call[name[payload]][constant[country]] assign[=] name[country]
return[call[name[self].request, parameter[name[route]]]] | keyword[def] identifier[category_playlists] ( identifier[self] , identifier[category_id] , identifier[limit] = literal[int] , identifier[offset] = literal[int] , identifier[country] = keyword[None] ):
literal[string]
identifier[route] = identifier[Route] ( literal[string] , literal[string] , identifier[category_id] = identifier[category_id] )
identifier[payload] ={ literal[string] : identifier[limit] , literal[string] : identifier[offset] }
keyword[if] identifier[country] :
identifier[payload] [ literal[string] ]= identifier[country]
keyword[return] identifier[self] . identifier[request] ( identifier[route] , identifier[params] = identifier[payload] ) | def category_playlists(self, category_id, limit=20, offset=0, country=None):
"""Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/browse/categories/{category_id}/playlists', category_id=category_id)
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country # depends on [control=['if'], data=[]]
return self.request(route, params=payload) |
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration) | def function[run_copy, parameter[self, source_project_dataset_tables, destination_project_dataset_table, write_disposition, create_disposition, labels]]:
constant[
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
]
variable[source_project_dataset_tables] assign[=] <ast.IfExp object at 0x7da1b0594700>
variable[source_project_dataset_tables_fixup] assign[=] list[[]]
for taget[name[source_project_dataset_table]] in starred[name[source_project_dataset_tables]] begin[:]
<ast.Tuple object at 0x7da1b0594d30> assign[=] call[name[_split_tablename], parameter[]]
call[name[source_project_dataset_tables_fixup].append, parameter[dictionary[[<ast.Constant object at 0x7da1b05943d0>, <ast.Constant object at 0x7da1b0594d90>, <ast.Constant object at 0x7da20e963610>], [<ast.Name object at 0x7da20e9631f0>, <ast.Name object at 0x7da20e960fd0>, <ast.Name object at 0x7da20e9608b0>]]]]
<ast.Tuple object at 0x7da20e9617e0> assign[=] call[name[_split_tablename], parameter[]]
variable[configuration] assign[=] dictionary[[<ast.Constant object at 0x7da1b052b0a0>], [<ast.Dict object at 0x7da1b052b6a0>]]
if name[labels] begin[:]
call[name[configuration]][constant[labels]] assign[=] name[labels]
return[call[name[self].run_with_configuration, parameter[name[configuration]]]] | keyword[def] identifier[run_copy] ( identifier[self] ,
identifier[source_project_dataset_tables] ,
identifier[destination_project_dataset_table] ,
identifier[write_disposition] = literal[string] ,
identifier[create_disposition] = literal[string] ,
identifier[labels] = keyword[None] ):
literal[string]
identifier[source_project_dataset_tables] =([
identifier[source_project_dataset_tables]
] keyword[if] keyword[not] identifier[isinstance] ( identifier[source_project_dataset_tables] , identifier[list] ) keyword[else]
identifier[source_project_dataset_tables] )
identifier[source_project_dataset_tables_fixup] =[]
keyword[for] identifier[source_project_dataset_table] keyword[in] identifier[source_project_dataset_tables] :
identifier[source_project] , identifier[source_dataset] , identifier[source_table] = identifier[_split_tablename] ( identifier[table_input] = identifier[source_project_dataset_table] ,
identifier[default_project_id] = identifier[self] . identifier[project_id] ,
identifier[var_name] = literal[string] )
identifier[source_project_dataset_tables_fixup] . identifier[append] ({
literal[string] :
identifier[source_project] ,
literal[string] :
identifier[source_dataset] ,
literal[string] :
identifier[source_table]
})
identifier[destination_project] , identifier[destination_dataset] , identifier[destination_table] = identifier[_split_tablename] ( identifier[table_input] = identifier[destination_project_dataset_table] ,
identifier[default_project_id] = identifier[self] . identifier[project_id] )
identifier[configuration] ={
literal[string] :{
literal[string] : identifier[create_disposition] ,
literal[string] : identifier[write_disposition] ,
literal[string] : identifier[source_project_dataset_tables_fixup] ,
literal[string] :{
literal[string] : identifier[destination_project] ,
literal[string] : identifier[destination_dataset] ,
literal[string] : identifier[destination_table]
}
}
}
keyword[if] identifier[labels] :
identifier[configuration] [ literal[string] ]= identifier[labels]
keyword[return] identifier[self] . identifier[run_with_configuration] ( identifier[configuration] ) | def run_copy(self, source_project_dataset_tables, destination_project_dataset_table, write_disposition='WRITE_EMPTY', create_disposition='CREATE_IF_NEEDED', labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = [source_project_dataset_tables] if not isinstance(source_project_dataset_tables, list) else source_project_dataset_tables
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
(source_project, source_dataset, source_table) = _split_tablename(table_input=source_project_dataset_table, default_project_id=self.project_id, var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table}) # depends on [control=['for'], data=['source_project_dataset_table']]
(destination_project, destination_dataset, destination_table) = _split_tablename(table_input=destination_project_dataset_table, default_project_id=self.project_id)
configuration = {'copy': {'createDisposition': create_disposition, 'writeDisposition': write_disposition, 'sourceTables': source_project_dataset_tables_fixup, 'destinationTable': {'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table}}}
if labels:
configuration['labels'] = labels # depends on [control=['if'], data=[]]
return self.run_with_configuration(configuration) |
def downgrade(directory, sql, tag, x_arg, revision):
"""Revert to a previous version"""
_downgrade(directory, revision, sql, tag, x_arg) | def function[downgrade, parameter[directory, sql, tag, x_arg, revision]]:
constant[Revert to a previous version]
call[name[_downgrade], parameter[name[directory], name[revision], name[sql], name[tag], name[x_arg]]] | keyword[def] identifier[downgrade] ( identifier[directory] , identifier[sql] , identifier[tag] , identifier[x_arg] , identifier[revision] ):
literal[string]
identifier[_downgrade] ( identifier[directory] , identifier[revision] , identifier[sql] , identifier[tag] , identifier[x_arg] ) | def downgrade(directory, sql, tag, x_arg, revision):
"""Revert to a previous version"""
_downgrade(directory, revision, sql, tag, x_arg) |
def swo_stop(self):
"""! @brief Stop receiving SWO data."""
try:
self._link.swo_configure(False, 0)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | def function[swo_stop, parameter[self]]:
constant[! @brief Stop receiving SWO data.]
<ast.Try object at 0x7da204566890> | keyword[def] identifier[swo_stop] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_link] . identifier[swo_configure] ( keyword[False] , literal[int] )
keyword[except] identifier[DAPAccess] . identifier[Error] keyword[as] identifier[exc] :
identifier[six] . identifier[raise_from] ( identifier[self] . identifier[_convert_exception] ( identifier[exc] ), identifier[exc] ) | def swo_stop(self):
"""! @brief Stop receiving SWO data."""
try:
self._link.swo_configure(False, 0) # depends on [control=['try'], data=[]]
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) # depends on [control=['except'], data=['exc']] |
def dump(
state, host,
remote_filename, database=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ postgresql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_psql_command(
executable='pg_dump',
database=database,
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
), remote_filename) | def function[dump, parameter[state, host, remote_filename, database, postgresql_user, postgresql_password, postgresql_host, postgresql_port]]:
constant[
Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ postgresql_*: global module arguments, see above
]
<ast.Yield object at 0x7da204566740> | keyword[def] identifier[dump] (
identifier[state] , identifier[host] ,
identifier[remote_filename] , identifier[database] = keyword[None] ,
identifier[postgresql_user] = keyword[None] , identifier[postgresql_password] = keyword[None] ,
identifier[postgresql_host] = keyword[None] , identifier[postgresql_port] = keyword[None] ,
):
literal[string]
keyword[yield] literal[string] . identifier[format] ( identifier[make_psql_command] (
identifier[executable] = literal[string] ,
identifier[database] = identifier[database] ,
identifier[user] = identifier[postgresql_user] ,
identifier[password] = identifier[postgresql_password] ,
identifier[host] = identifier[postgresql_host] ,
identifier[port] = identifier[postgresql_port] ,
), identifier[remote_filename] ) | def dump(state, host, remote_filename, database=None, postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None):
# Details for speaking to PostgreSQL via `psql` CLI
'\n Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``.\n\n + database: name of the database to dump\n + remote_filename: name of the file to dump the SQL to\n + postgresql_*: global module arguments, see above\n '
yield '{0} > {1}'.format(make_psql_command(executable='pg_dump', database=database, user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port), remote_filename) |
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = 'pandas'
previous_line = current_section = current_subsection = ''
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set('-'):
current_section = previous_line
continue
if set(line) == set('~'):
current_subsection = previous_line
continue
if line.startswith('.. currentmodule::'):
current_module = line.replace('.. currentmodule::', '').strip()
continue
if line == '.. autosummary::':
position = 'autosummary'
continue
if position == 'autosummary':
if line == '':
position = 'items'
continue
if position == 'items':
if line == '':
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split('.'):
func = getattr(func, part)
yield ('.'.join([current_module, item]), func,
current_section, current_subsection)
previous_line = line | def function[get_api_items, parameter[api_doc_fd]]:
constant[
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
]
variable[current_module] assign[=] constant[pandas]
variable[previous_line] assign[=] constant[]
variable[position] assign[=] constant[None]
for taget[name[line]] in starred[name[api_doc_fd]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if compare[call[name[len], parameter[name[line]]] equal[==] call[name[len], parameter[name[previous_line]]]] begin[:]
if compare[call[name[set], parameter[name[line]]] equal[==] call[name[set], parameter[constant[-]]]] begin[:]
variable[current_section] assign[=] name[previous_line]
continue
if compare[call[name[set], parameter[name[line]]] equal[==] call[name[set], parameter[constant[~]]]] begin[:]
variable[current_subsection] assign[=] name[previous_line]
continue
if call[name[line].startswith, parameter[constant[.. currentmodule::]]] begin[:]
variable[current_module] assign[=] call[call[name[line].replace, parameter[constant[.. currentmodule::], constant[]]].strip, parameter[]]
continue
if compare[name[line] equal[==] constant[.. autosummary::]] begin[:]
variable[position] assign[=] constant[autosummary]
continue
if compare[name[position] equal[==] constant[autosummary]] begin[:]
if compare[name[line] equal[==] constant[]] begin[:]
variable[position] assign[=] constant[items]
continue
if compare[name[position] equal[==] constant[items]] begin[:]
if compare[name[line] equal[==] constant[]] begin[:]
variable[position] assign[=] constant[None]
continue
variable[item] assign[=] call[name[line].strip, parameter[]]
variable[func] assign[=] call[name[importlib].import_module, parameter[name[current_module]]]
for taget[name[part]] in starred[call[name[item].split, parameter[constant[.]]]] begin[:]
variable[func] assign[=] call[name[getattr], parameter[name[func], name[part]]]
<ast.Yield object at 0x7da1b1dd9c00>
variable[previous_line] assign[=] name[line] | keyword[def] identifier[get_api_items] ( identifier[api_doc_fd] ):
literal[string]
identifier[current_module] = literal[string]
identifier[previous_line] = identifier[current_section] = identifier[current_subsection] = literal[string]
identifier[position] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[api_doc_fd] :
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[len] ( identifier[line] )== identifier[len] ( identifier[previous_line] ):
keyword[if] identifier[set] ( identifier[line] )== identifier[set] ( literal[string] ):
identifier[current_section] = identifier[previous_line]
keyword[continue]
keyword[if] identifier[set] ( identifier[line] )== identifier[set] ( literal[string] ):
identifier[current_subsection] = identifier[previous_line]
keyword[continue]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[current_module] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ). identifier[strip] ()
keyword[continue]
keyword[if] identifier[line] == literal[string] :
identifier[position] = literal[string]
keyword[continue]
keyword[if] identifier[position] == literal[string] :
keyword[if] identifier[line] == literal[string] :
identifier[position] = literal[string]
keyword[continue]
keyword[if] identifier[position] == literal[string] :
keyword[if] identifier[line] == literal[string] :
identifier[position] = keyword[None]
keyword[continue]
identifier[item] = identifier[line] . identifier[strip] ()
identifier[func] = identifier[importlib] . identifier[import_module] ( identifier[current_module] )
keyword[for] identifier[part] keyword[in] identifier[item] . identifier[split] ( literal[string] ):
identifier[func] = identifier[getattr] ( identifier[func] , identifier[part] )
keyword[yield] ( literal[string] . identifier[join] ([ identifier[current_module] , identifier[item] ]), identifier[func] ,
identifier[current_section] , identifier[current_subsection] )
identifier[previous_line] = identifier[line] | def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = 'pandas'
previous_line = current_section = current_subsection = ''
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set('-'):
current_section = previous_line
continue # depends on [control=['if'], data=[]]
if set(line) == set('~'):
current_subsection = previous_line
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if line.startswith('.. currentmodule::'):
current_module = line.replace('.. currentmodule::', '').strip()
continue # depends on [control=['if'], data=[]]
if line == '.. autosummary::':
position = 'autosummary'
continue # depends on [control=['if'], data=[]]
if position == 'autosummary':
if line == '':
position = 'items'
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['position']]
if position == 'items':
if line == '':
position = None
continue # depends on [control=['if'], data=[]]
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split('.'):
func = getattr(func, part) # depends on [control=['for'], data=['part']]
yield ('.'.join([current_module, item]), func, current_section, current_subsection) # depends on [control=['if'], data=['position']]
previous_line = line # depends on [control=['for'], data=['line']] |
def shake_shake_skip_connection(x, output_filters, stride, is_training):
"""Adds a residual connection to the filter x for the shake-shake model."""
curr_filters = common_layers.shape_list(x)[-1]
if curr_filters == output_filters:
return x
stride_spec = [1, stride, stride, 1]
# Skip path 1.
path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, "VALID")
path1 = tf.layers.conv2d(
path1, int(output_filters / 2), (1, 1), padding="SAME", name="path1_conv")
# Skip path 2.
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] # First pad with 0's then crop.
path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, "VALID")
path2 = tf.layers.conv2d(
path2, int(output_filters / 2), (1, 1), padding="SAME", name="path2_conv")
# Concat and apply BN.
final_path = tf.concat(values=[path1, path2], axis=-1)
final_path = tf.layers.batch_normalization(
final_path, training=is_training, name="final_path_bn")
return final_path | def function[shake_shake_skip_connection, parameter[x, output_filters, stride, is_training]]:
constant[Adds a residual connection to the filter x for the shake-shake model.]
variable[curr_filters] assign[=] call[call[name[common_layers].shape_list, parameter[name[x]]]][<ast.UnaryOp object at 0x7da1b201e320>]
if compare[name[curr_filters] equal[==] name[output_filters]] begin[:]
return[name[x]]
variable[stride_spec] assign[=] list[[<ast.Constant object at 0x7da1b201e3b0>, <ast.Name object at 0x7da1b201ce20>, <ast.Name object at 0x7da1b201e1d0>, <ast.Constant object at 0x7da1b201e530>]]
variable[path1] assign[=] call[name[tf].nn.avg_pool, parameter[name[x], list[[<ast.Constant object at 0x7da1b201d4b0>, <ast.Constant object at 0x7da1b201e860>, <ast.Constant object at 0x7da1b201fbe0>, <ast.Constant object at 0x7da1b201e6b0>]], name[stride_spec], constant[VALID]]]
variable[path1] assign[=] call[name[tf].layers.conv2d, parameter[name[path1], call[name[int], parameter[binary_operation[name[output_filters] / constant[2]]]], tuple[[<ast.Constant object at 0x7da1b201ffd0>, <ast.Constant object at 0x7da1b201e7a0>]]]]
variable[pad_arr] assign[=] list[[<ast.List object at 0x7da1b201c880>, <ast.List object at 0x7da1b201c460>, <ast.List object at 0x7da1b201d960>, <ast.List object at 0x7da1b201d060>]]
variable[path2] assign[=] call[call[name[tf].pad, parameter[name[x], name[pad_arr]]]][tuple[[<ast.Slice object at 0x7da1b201d1b0>, <ast.Slice object at 0x7da1b201f280>, <ast.Slice object at 0x7da1b201c0a0>, <ast.Slice object at 0x7da1b201dc60>]]]
variable[path2] assign[=] call[name[tf].nn.avg_pool, parameter[name[path2], list[[<ast.Constant object at 0x7da1b201f190>, <ast.Constant object at 0x7da1b201eec0>, <ast.Constant object at 0x7da1b201d2d0>, <ast.Constant object at 0x7da1b201d2a0>]], name[stride_spec], constant[VALID]]]
variable[path2] assign[=] call[name[tf].layers.conv2d, parameter[name[path2], call[name[int], parameter[binary_operation[name[output_filters] / constant[2]]]], tuple[[<ast.Constant object at 0x7da1b201ce80>, <ast.Constant object at 0x7da1b201c2b0>]]]]
variable[final_path] assign[=] call[name[tf].concat, parameter[]]
variable[final_path] assign[=] call[name[tf].layers.batch_normalization, parameter[name[final_path]]]
return[name[final_path]] | keyword[def] identifier[shake_shake_skip_connection] ( identifier[x] , identifier[output_filters] , identifier[stride] , identifier[is_training] ):
literal[string]
identifier[curr_filters] = identifier[common_layers] . identifier[shape_list] ( identifier[x] )[- literal[int] ]
keyword[if] identifier[curr_filters] == identifier[output_filters] :
keyword[return] identifier[x]
identifier[stride_spec] =[ literal[int] , identifier[stride] , identifier[stride] , literal[int] ]
identifier[path1] = identifier[tf] . identifier[nn] . identifier[avg_pool] ( identifier[x] ,[ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[stride_spec] , literal[string] )
identifier[path1] = identifier[tf] . identifier[layers] . identifier[conv2d] (
identifier[path1] , identifier[int] ( identifier[output_filters] / literal[int] ),( literal[int] , literal[int] ), identifier[padding] = literal[string] , identifier[name] = literal[string] )
identifier[pad_arr] =[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ]]
identifier[path2] = identifier[tf] . identifier[pad] ( identifier[x] , identifier[pad_arr] )[:, literal[int] :, literal[int] :,:]
identifier[path2] = identifier[tf] . identifier[nn] . identifier[avg_pool] ( identifier[path2] ,[ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[stride_spec] , literal[string] )
identifier[path2] = identifier[tf] . identifier[layers] . identifier[conv2d] (
identifier[path2] , identifier[int] ( identifier[output_filters] / literal[int] ),( literal[int] , literal[int] ), identifier[padding] = literal[string] , identifier[name] = literal[string] )
identifier[final_path] = identifier[tf] . identifier[concat] ( identifier[values] =[ identifier[path1] , identifier[path2] ], identifier[axis] =- literal[int] )
identifier[final_path] = identifier[tf] . identifier[layers] . identifier[batch_normalization] (
identifier[final_path] , identifier[training] = identifier[is_training] , identifier[name] = literal[string] )
keyword[return] identifier[final_path] | def shake_shake_skip_connection(x, output_filters, stride, is_training):
"""Adds a residual connection to the filter x for the shake-shake model."""
curr_filters = common_layers.shape_list(x)[-1]
if curr_filters == output_filters:
return x # depends on [control=['if'], data=[]]
stride_spec = [1, stride, stride, 1]
# Skip path 1.
path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, 'VALID')
path1 = tf.layers.conv2d(path1, int(output_filters / 2), (1, 1), padding='SAME', name='path1_conv')
# Skip path 2.
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] # First pad with 0's then crop.
path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, 'VALID')
path2 = tf.layers.conv2d(path2, int(output_filters / 2), (1, 1), padding='SAME', name='path2_conv')
# Concat and apply BN.
final_path = tf.concat(values=[path1, path2], axis=-1)
final_path = tf.layers.batch_normalization(final_path, training=is_training, name='final_path_bn')
return final_path |
def deserialize(self, jwt, key=None):
"""Deserialize a JWT token.
NOTE: Destroys any current status and tries to import the raw
token provided.
:param jwt: a 'raw' JWT token.
:param key: A (:class:`jwcrypto.jwk.JWK`) verification or
decryption key, or a (:class:`jwcrypto.jwk.JWKSet`) that
contains a key indexed by the 'kid' header.
"""
c = jwt.count('.')
if c == 2:
self.token = JWS()
elif c == 4:
self.token = JWE()
else:
raise ValueError("Token format unrecognized")
# Apply algs restrictions if any, before performing any operation
if self._algs:
self.token.allowed_algs = self._algs
self.deserializelog = list()
# now deserialize and also decrypt/verify (or raise) if we
# have a key
if key is None:
self.token.deserialize(jwt, None)
elif isinstance(key, JWK):
self.token.deserialize(jwt, key)
self.deserializelog.append("Success")
elif isinstance(key, JWKSet):
self.token.deserialize(jwt, None)
if 'kid' in self.token.jose_header:
kid_key = key.get_key(self.token.jose_header['kid'])
if not kid_key:
raise JWTMissingKey('Key ID %s not in key set'
% self.token.jose_header['kid'])
self.token.deserialize(jwt, kid_key)
else:
for k in key:
try:
self.token.deserialize(jwt, k)
self.deserializelog.append("Success")
break
except Exception as e: # pylint: disable=broad-except
keyid = k.key_id
if keyid is None:
keyid = k.thumbprint()
self.deserializelog.append('Key [%s] failed: [%s]' % (
keyid, repr(e)))
continue
if "Success" not in self.deserializelog:
raise JWTMissingKey('No working key found in key set')
else:
raise ValueError("Unrecognized Key Type")
if key is not None:
self.header = self.token.jose_header
self.claims = self.token.payload.decode('utf-8')
self._check_provided_claims() | def function[deserialize, parameter[self, jwt, key]]:
constant[Deserialize a JWT token.
NOTE: Destroys any current status and tries to import the raw
token provided.
:param jwt: a 'raw' JWT token.
:param key: A (:class:`jwcrypto.jwk.JWK`) verification or
decryption key, or a (:class:`jwcrypto.jwk.JWKSet`) that
contains a key indexed by the 'kid' header.
]
variable[c] assign[=] call[name[jwt].count, parameter[constant[.]]]
if compare[name[c] equal[==] constant[2]] begin[:]
name[self].token assign[=] call[name[JWS], parameter[]]
if name[self]._algs begin[:]
name[self].token.allowed_algs assign[=] name[self]._algs
name[self].deserializelog assign[=] call[name[list], parameter[]]
if compare[name[key] is constant[None]] begin[:]
call[name[self].token.deserialize, parameter[name[jwt], constant[None]]]
if compare[name[key] is_not constant[None]] begin[:]
name[self].header assign[=] name[self].token.jose_header
name[self].claims assign[=] call[name[self].token.payload.decode, parameter[constant[utf-8]]]
call[name[self]._check_provided_claims, parameter[]] | keyword[def] identifier[deserialize] ( identifier[self] , identifier[jwt] , identifier[key] = keyword[None] ):
literal[string]
identifier[c] = identifier[jwt] . identifier[count] ( literal[string] )
keyword[if] identifier[c] == literal[int] :
identifier[self] . identifier[token] = identifier[JWS] ()
keyword[elif] identifier[c] == literal[int] :
identifier[self] . identifier[token] = identifier[JWE] ()
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[_algs] :
identifier[self] . identifier[token] . identifier[allowed_algs] = identifier[self] . identifier[_algs]
identifier[self] . identifier[deserializelog] = identifier[list] ()
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[self] . identifier[token] . identifier[deserialize] ( identifier[jwt] , keyword[None] )
keyword[elif] identifier[isinstance] ( identifier[key] , identifier[JWK] ):
identifier[self] . identifier[token] . identifier[deserialize] ( identifier[jwt] , identifier[key] )
identifier[self] . identifier[deserializelog] . identifier[append] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[key] , identifier[JWKSet] ):
identifier[self] . identifier[token] . identifier[deserialize] ( identifier[jwt] , keyword[None] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[token] . identifier[jose_header] :
identifier[kid_key] = identifier[key] . identifier[get_key] ( identifier[self] . identifier[token] . identifier[jose_header] [ literal[string] ])
keyword[if] keyword[not] identifier[kid_key] :
keyword[raise] identifier[JWTMissingKey] ( literal[string]
% identifier[self] . identifier[token] . identifier[jose_header] [ literal[string] ])
identifier[self] . identifier[token] . identifier[deserialize] ( identifier[jwt] , identifier[kid_key] )
keyword[else] :
keyword[for] identifier[k] keyword[in] identifier[key] :
keyword[try] :
identifier[self] . identifier[token] . identifier[deserialize] ( identifier[jwt] , identifier[k] )
identifier[self] . identifier[deserializelog] . identifier[append] ( literal[string] )
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[keyid] = identifier[k] . identifier[key_id]
keyword[if] identifier[keyid] keyword[is] keyword[None] :
identifier[keyid] = identifier[k] . identifier[thumbprint] ()
identifier[self] . identifier[deserializelog] . identifier[append] ( literal[string] %(
identifier[keyid] , identifier[repr] ( identifier[e] )))
keyword[continue]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[deserializelog] :
keyword[raise] identifier[JWTMissingKey] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[header] = identifier[self] . identifier[token] . identifier[jose_header]
identifier[self] . identifier[claims] = identifier[self] . identifier[token] . identifier[payload] . identifier[decode] ( literal[string] )
identifier[self] . identifier[_check_provided_claims] () | def deserialize(self, jwt, key=None):
"""Deserialize a JWT token.
NOTE: Destroys any current status and tries to import the raw
token provided.
:param jwt: a 'raw' JWT token.
:param key: A (:class:`jwcrypto.jwk.JWK`) verification or
decryption key, or a (:class:`jwcrypto.jwk.JWKSet`) that
contains a key indexed by the 'kid' header.
"""
c = jwt.count('.')
if c == 2:
self.token = JWS() # depends on [control=['if'], data=[]]
elif c == 4:
self.token = JWE() # depends on [control=['if'], data=[]]
else:
raise ValueError('Token format unrecognized')
# Apply algs restrictions if any, before performing any operation
if self._algs:
self.token.allowed_algs = self._algs # depends on [control=['if'], data=[]]
self.deserializelog = list()
# now deserialize and also decrypt/verify (or raise) if we
# have a key
if key is None:
self.token.deserialize(jwt, None) # depends on [control=['if'], data=[]]
elif isinstance(key, JWK):
self.token.deserialize(jwt, key)
self.deserializelog.append('Success') # depends on [control=['if'], data=[]]
elif isinstance(key, JWKSet):
self.token.deserialize(jwt, None)
if 'kid' in self.token.jose_header:
kid_key = key.get_key(self.token.jose_header['kid'])
if not kid_key:
raise JWTMissingKey('Key ID %s not in key set' % self.token.jose_header['kid']) # depends on [control=['if'], data=[]]
self.token.deserialize(jwt, kid_key) # depends on [control=['if'], data=[]]
else:
for k in key:
try:
self.token.deserialize(jwt, k)
self.deserializelog.append('Success')
break # depends on [control=['try'], data=[]]
except Exception as e: # pylint: disable=broad-except
keyid = k.key_id
if keyid is None:
keyid = k.thumbprint() # depends on [control=['if'], data=['keyid']]
self.deserializelog.append('Key [%s] failed: [%s]' % (keyid, repr(e)))
continue # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['k']]
if 'Success' not in self.deserializelog:
raise JWTMissingKey('No working key found in key set') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Unrecognized Key Type')
if key is not None:
self.header = self.token.jose_header
self.claims = self.token.payload.decode('utf-8')
self._check_provided_claims() # depends on [control=['if'], data=[]] |
def V_hollow_cylinder(Di, Do, L):
r'''Returns the volume of a hollow cylinder.
.. math::
V = \frac{\pi D_o^2}{4}L - L\frac{\pi D_i^2}{4}
Parameters
----------
Di : float
Diameter of the hollow in the cylinder, [m]
Do : float
Diameter of the exterior of the cylinder, [m]
L : float
Length of the cylinder, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
>>> V_hollow_cylinder(0.005, 0.01, 0.1)
5.890486225480862e-06
'''
V = pi*Do**2/4*L - pi*Di**2/4*L
return V | def function[V_hollow_cylinder, parameter[Di, Do, L]]:
constant[Returns the volume of a hollow cylinder.
.. math::
V = \frac{\pi D_o^2}{4}L - L\frac{\pi D_i^2}{4}
Parameters
----------
Di : float
Diameter of the hollow in the cylinder, [m]
Do : float
Diameter of the exterior of the cylinder, [m]
L : float
Length of the cylinder, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
>>> V_hollow_cylinder(0.005, 0.01, 0.1)
5.890486225480862e-06
]
variable[V] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[pi] * binary_operation[name[Do] ** constant[2]]] / constant[4]] * name[L]] - binary_operation[binary_operation[binary_operation[name[pi] * binary_operation[name[Di] ** constant[2]]] / constant[4]] * name[L]]]
return[name[V]] | keyword[def] identifier[V_hollow_cylinder] ( identifier[Di] , identifier[Do] , identifier[L] ):
literal[string]
identifier[V] = identifier[pi] * identifier[Do] ** literal[int] / literal[int] * identifier[L] - identifier[pi] * identifier[Di] ** literal[int] / literal[int] * identifier[L]
keyword[return] identifier[V] | def V_hollow_cylinder(Di, Do, L):
"""Returns the volume of a hollow cylinder.
.. math::
V = \\frac{\\pi D_o^2}{4}L - L\\frac{\\pi D_i^2}{4}
Parameters
----------
Di : float
Diameter of the hollow in the cylinder, [m]
Do : float
Diameter of the exterior of the cylinder, [m]
L : float
Length of the cylinder, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
>>> V_hollow_cylinder(0.005, 0.01, 0.1)
5.890486225480862e-06
"""
V = pi * Do ** 2 / 4 * L - pi * Di ** 2 / 4 * L
return V |
def set_orthogonal_selection(self, selection, value, fields=None):
"""Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# setup indexer
indexer = OrthogonalIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) | def function[set_orthogonal_selection, parameter[self, selection, value, fields]]:
constant[Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
]
if name[self]._read_only begin[:]
call[name[err_read_only], parameter[]]
if <ast.UnaryOp object at 0x7da1b19d9360> begin[:]
call[name[self]._load_metadata_nosync, parameter[]]
variable[indexer] assign[=] call[name[OrthogonalIndexer], parameter[name[selection], name[self]]]
call[name[self]._set_selection, parameter[name[indexer], name[value]]] | keyword[def] identifier[set_orthogonal_selection] ( identifier[self] , identifier[selection] , identifier[value] , identifier[fields] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_read_only] :
identifier[err_read_only] ()
keyword[if] keyword[not] identifier[self] . identifier[_cache_metadata] :
identifier[self] . identifier[_load_metadata_nosync] ()
identifier[indexer] = identifier[OrthogonalIndexer] ( identifier[selection] , identifier[self] )
identifier[self] . identifier[_set_selection] ( identifier[indexer] , identifier[value] , identifier[fields] = identifier[fields] ) | def set_orthogonal_selection(self, selection, value, fields=None):
"""Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only() # depends on [control=['if'], data=[]]
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync() # depends on [control=['if'], data=[]]
# setup indexer
indexer = OrthogonalIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) |
def groupby2(*args):
""" Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator.
"""
generatorList = [] # list of each list's (k, group) tuples
if len(args) % 2 == 1:
raise ValueError("Must have a key function for every list.")
advanceList = []
# populate above lists
for i in xrange(0, len(args), 2):
listn = args[i]
fn = args[i + 1]
if listn is not None:
generatorList.append(groupby(listn, fn))
advanceList.append(True) # start by advancing everyone.
else:
generatorList.append(None)
advanceList.append(False)
n = len(generatorList)
nextList = [None] * n
# while all lists aren't exhausted walk through each group in order
while True:
for i in xrange(n):
if advanceList[i]:
try:
nextList[i] = generatorList[i].next()
except StopIteration:
nextList[i] = None
# no more values to process in any of the generators
if all(entry is None for entry in nextList):
break
# the minimum key value in the nextList
minKeyVal = min(nextVal[0] for nextVal in nextList
if nextVal is not None)
# populate the tuple to return based on minKeyVal
retGroups = [minKeyVal]
for i in xrange(n):
if nextList[i] is not None and nextList[i][0] == minKeyVal:
retGroups.append(nextList[i][1])
advanceList[i] = True
else:
advanceList[i] = False
retGroups.append(None)
yield tuple(retGroups) | def function[groupby2, parameter[]]:
constant[ Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator.
]
variable[generatorList] assign[=] list[[]]
if compare[binary_operation[call[name[len], parameter[name[args]]] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[1]] begin[:]
<ast.Raise object at 0x7da18dc05780>
variable[advanceList] assign[=] list[[]]
for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], call[name[len], parameter[name[args]]], constant[2]]]] begin[:]
variable[listn] assign[=] call[name[args]][name[i]]
variable[fn] assign[=] call[name[args]][binary_operation[name[i] + constant[1]]]
if compare[name[listn] is_not constant[None]] begin[:]
call[name[generatorList].append, parameter[call[name[groupby], parameter[name[listn], name[fn]]]]]
call[name[advanceList].append, parameter[constant[True]]]
variable[n] assign[=] call[name[len], parameter[name[generatorList]]]
variable[nextList] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18dc04790>]] * name[n]]
while constant[True] begin[:]
for taget[name[i]] in starred[call[name[xrange], parameter[name[n]]]] begin[:]
if call[name[advanceList]][name[i]] begin[:]
<ast.Try object at 0x7da18dc05360>
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da18bc703a0>]] begin[:]
break
variable[minKeyVal] assign[=] call[name[min], parameter[<ast.GeneratorExp object at 0x7da18bc73640>]]
variable[retGroups] assign[=] list[[<ast.Name object at 0x7da18bc718d0>]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[n]]]] begin[:]
if <ast.BoolOp object at 0x7da18bc72890> begin[:]
call[name[retGroups].append, parameter[call[call[name[nextList]][name[i]]][constant[1]]]]
call[name[advanceList]][name[i]] assign[=] constant[True]
<ast.Yield object at 0x7da18bc71a50> | keyword[def] identifier[groupby2] (* identifier[args] ):
literal[string]
identifier[generatorList] =[]
keyword[if] identifier[len] ( identifier[args] )% literal[int] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[advanceList] =[]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[args] ), literal[int] ):
identifier[listn] = identifier[args] [ identifier[i] ]
identifier[fn] = identifier[args] [ identifier[i] + literal[int] ]
keyword[if] identifier[listn] keyword[is] keyword[not] keyword[None] :
identifier[generatorList] . identifier[append] ( identifier[groupby] ( identifier[listn] , identifier[fn] ))
identifier[advanceList] . identifier[append] ( keyword[True] )
keyword[else] :
identifier[generatorList] . identifier[append] ( keyword[None] )
identifier[advanceList] . identifier[append] ( keyword[False] )
identifier[n] = identifier[len] ( identifier[generatorList] )
identifier[nextList] =[ keyword[None] ]* identifier[n]
keyword[while] keyword[True] :
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[n] ):
keyword[if] identifier[advanceList] [ identifier[i] ]:
keyword[try] :
identifier[nextList] [ identifier[i] ]= identifier[generatorList] [ identifier[i] ]. identifier[next] ()
keyword[except] identifier[StopIteration] :
identifier[nextList] [ identifier[i] ]= keyword[None]
keyword[if] identifier[all] ( identifier[entry] keyword[is] keyword[None] keyword[for] identifier[entry] keyword[in] identifier[nextList] ):
keyword[break]
identifier[minKeyVal] = identifier[min] ( identifier[nextVal] [ literal[int] ] keyword[for] identifier[nextVal] keyword[in] identifier[nextList]
keyword[if] identifier[nextVal] keyword[is] keyword[not] keyword[None] )
identifier[retGroups] =[ identifier[minKeyVal] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[n] ):
keyword[if] identifier[nextList] [ identifier[i] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nextList] [ identifier[i] ][ literal[int] ]== identifier[minKeyVal] :
identifier[retGroups] . identifier[append] ( identifier[nextList] [ identifier[i] ][ literal[int] ])
identifier[advanceList] [ identifier[i] ]= keyword[True]
keyword[else] :
identifier[advanceList] [ identifier[i] ]= keyword[False]
identifier[retGroups] . identifier[append] ( keyword[None] )
keyword[yield] identifier[tuple] ( identifier[retGroups] ) | def groupby2(*args):
""" Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator.
"""
generatorList = [] # list of each list's (k, group) tuples
if len(args) % 2 == 1:
raise ValueError('Must have a key function for every list.') # depends on [control=['if'], data=[]]
advanceList = []
# populate above lists
for i in xrange(0, len(args), 2):
listn = args[i]
fn = args[i + 1]
if listn is not None:
generatorList.append(groupby(listn, fn))
advanceList.append(True) # start by advancing everyone. # depends on [control=['if'], data=['listn']]
else:
generatorList.append(None)
advanceList.append(False) # depends on [control=['for'], data=['i']]
n = len(generatorList)
nextList = [None] * n
# while all lists aren't exhausted walk through each group in order
while True:
for i in xrange(n):
if advanceList[i]:
try:
nextList[i] = generatorList[i].next() # depends on [control=['try'], data=[]]
except StopIteration:
nextList[i] = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# no more values to process in any of the generators
if all((entry is None for entry in nextList)):
break # depends on [control=['if'], data=[]]
# the minimum key value in the nextList
minKeyVal = min((nextVal[0] for nextVal in nextList if nextVal is not None))
# populate the tuple to return based on minKeyVal
retGroups = [minKeyVal]
for i in xrange(n):
if nextList[i] is not None and nextList[i][0] == minKeyVal:
retGroups.append(nextList[i][1])
advanceList[i] = True # depends on [control=['if'], data=[]]
else:
advanceList[i] = False
retGroups.append(None) # depends on [control=['for'], data=['i']]
yield tuple(retGroups) # depends on [control=['while'], data=[]] |
def bits_clear_in_range( bits, range_start, range_end ):
"""
Yield start,end tuples for each span of clear bits in [range_start,range_end)
"""
end = range_start
while 1:
start = bits.next_clear( end )
if start >= range_end: break
end = min( bits.next_set( start ), range_end )
yield start, end | def function[bits_clear_in_range, parameter[bits, range_start, range_end]]:
constant[
Yield start,end tuples for each span of clear bits in [range_start,range_end)
]
variable[end] assign[=] name[range_start]
while constant[1] begin[:]
variable[start] assign[=] call[name[bits].next_clear, parameter[name[end]]]
if compare[name[start] greater_or_equal[>=] name[range_end]] begin[:]
break
variable[end] assign[=] call[name[min], parameter[call[name[bits].next_set, parameter[name[start]]], name[range_end]]]
<ast.Yield object at 0x7da1b0d6b640> | keyword[def] identifier[bits_clear_in_range] ( identifier[bits] , identifier[range_start] , identifier[range_end] ):
literal[string]
identifier[end] = identifier[range_start]
keyword[while] literal[int] :
identifier[start] = identifier[bits] . identifier[next_clear] ( identifier[end] )
keyword[if] identifier[start] >= identifier[range_end] : keyword[break]
identifier[end] = identifier[min] ( identifier[bits] . identifier[next_set] ( identifier[start] ), identifier[range_end] )
keyword[yield] identifier[start] , identifier[end] | def bits_clear_in_range(bits, range_start, range_end):
"""
Yield start,end tuples for each span of clear bits in [range_start,range_end)
"""
end = range_start
while 1:
start = bits.next_clear(end)
if start >= range_end:
break # depends on [control=['if'], data=[]]
end = min(bits.next_set(start), range_end)
yield (start, end) # depends on [control=['while'], data=[]] |
def merge(self, other):
'''
Returns a *copy* of this phoneme, with the features of other merged into this feature bundle.
Other can be a list of phonemes, in which case the list is returned (for technical reasons).
Other may also be a single feature value or a list of feature values.
'''
phoneme = deepcopy(self)
# special case for list of phonemes
if isinstance(other, list) and len(other) > 0 and isinstance(other[0], AbstractPhoneme):
return other
if isinstance(other, AbstractPhoneme):
feature_values = other.features.values()
elif type(other) != list and type(other) != tuple:
feature_values = [other]
else:
feature_values = other
for f in feature_values:
if type(f) == list:
for inner_f in f:
phoneme[type(inner_f)] = inner_f
elif isinstance(f, AbstractPhoneme):
phoneme = phoneme << f
else:
phoneme[type(f)] = f
if isinstance(other, AbstractPhoneme) and other.ipa is not None:
phoneme.ipa = other.ipa
return phoneme | def function[merge, parameter[self, other]]:
constant[
Returns a *copy* of this phoneme, with the features of other merged into this feature bundle.
Other can be a list of phonemes, in which case the list is returned (for technical reasons).
Other may also be a single feature value or a list of feature values.
]
variable[phoneme] assign[=] call[name[deepcopy], parameter[name[self]]]
if <ast.BoolOp object at 0x7da1b26aef50> begin[:]
return[name[other]]
if call[name[isinstance], parameter[name[other], name[AbstractPhoneme]]] begin[:]
variable[feature_values] assign[=] call[name[other].features.values, parameter[]]
for taget[name[f]] in starred[name[feature_values]] begin[:]
if compare[call[name[type], parameter[name[f]]] equal[==] name[list]] begin[:]
for taget[name[inner_f]] in starred[name[f]] begin[:]
call[name[phoneme]][call[name[type], parameter[name[inner_f]]]] assign[=] name[inner_f]
if <ast.BoolOp object at 0x7da2046232e0> begin[:]
name[phoneme].ipa assign[=] name[other].ipa
return[name[phoneme]] | keyword[def] identifier[merge] ( identifier[self] , identifier[other] ):
literal[string]
identifier[phoneme] = identifier[deepcopy] ( identifier[self] )
keyword[if] identifier[isinstance] ( identifier[other] , identifier[list] ) keyword[and] identifier[len] ( identifier[other] )> literal[int] keyword[and] identifier[isinstance] ( identifier[other] [ literal[int] ], identifier[AbstractPhoneme] ):
keyword[return] identifier[other]
keyword[if] identifier[isinstance] ( identifier[other] , identifier[AbstractPhoneme] ):
identifier[feature_values] = identifier[other] . identifier[features] . identifier[values] ()
keyword[elif] identifier[type] ( identifier[other] )!= identifier[list] keyword[and] identifier[type] ( identifier[other] )!= identifier[tuple] :
identifier[feature_values] =[ identifier[other] ]
keyword[else] :
identifier[feature_values] = identifier[other]
keyword[for] identifier[f] keyword[in] identifier[feature_values] :
keyword[if] identifier[type] ( identifier[f] )== identifier[list] :
keyword[for] identifier[inner_f] keyword[in] identifier[f] :
identifier[phoneme] [ identifier[type] ( identifier[inner_f] )]= identifier[inner_f]
keyword[elif] identifier[isinstance] ( identifier[f] , identifier[AbstractPhoneme] ):
identifier[phoneme] = identifier[phoneme] << identifier[f]
keyword[else] :
identifier[phoneme] [ identifier[type] ( identifier[f] )]= identifier[f]
keyword[if] identifier[isinstance] ( identifier[other] , identifier[AbstractPhoneme] ) keyword[and] identifier[other] . identifier[ipa] keyword[is] keyword[not] keyword[None] :
identifier[phoneme] . identifier[ipa] = identifier[other] . identifier[ipa]
keyword[return] identifier[phoneme] | def merge(self, other):
"""
Returns a *copy* of this phoneme, with the features of other merged into this feature bundle.
Other can be a list of phonemes, in which case the list is returned (for technical reasons).
Other may also be a single feature value or a list of feature values.
"""
phoneme = deepcopy(self) # special case for list of phonemes
if isinstance(other, list) and len(other) > 0 and isinstance(other[0], AbstractPhoneme):
return other # depends on [control=['if'], data=[]]
if isinstance(other, AbstractPhoneme):
feature_values = other.features.values() # depends on [control=['if'], data=[]]
elif type(other) != list and type(other) != tuple:
feature_values = [other] # depends on [control=['if'], data=[]]
else:
feature_values = other
for f in feature_values:
if type(f) == list:
for inner_f in f:
phoneme[type(inner_f)] = inner_f # depends on [control=['for'], data=['inner_f']] # depends on [control=['if'], data=[]]
elif isinstance(f, AbstractPhoneme):
phoneme = phoneme << f # depends on [control=['if'], data=[]]
else:
phoneme[type(f)] = f # depends on [control=['for'], data=['f']]
if isinstance(other, AbstractPhoneme) and other.ipa is not None:
phoneme.ipa = other.ipa # depends on [control=['if'], data=[]]
return phoneme |
def __should_warn_on_redef(
ctx: GeneratorContext, defsym: sym.Symbol, safe_name: str, def_meta: lmap.Map
) -> bool:
"""Return True if the compiler should emit a warning about this name being redefined."""
no_warn_on_redef = def_meta.entry(SYM_NO_WARN_ON_REDEF_META_KEY, False)
if no_warn_on_redef:
return False
elif safe_name in ctx.current_ns.module.__dict__:
return True
elif defsym in ctx.current_ns.interns:
var = ctx.current_ns.find(defsym)
assert var is not None, f"Var {defsym} cannot be none here"
if var.meta is not None and var.meta.entry(SYM_REDEF_META_KEY):
return False
elif var.is_bound:
return True
else:
return False
else:
return False | def function[__should_warn_on_redef, parameter[ctx, defsym, safe_name, def_meta]]:
constant[Return True if the compiler should emit a warning about this name being redefined.]
variable[no_warn_on_redef] assign[=] call[name[def_meta].entry, parameter[name[SYM_NO_WARN_ON_REDEF_META_KEY], constant[False]]]
if name[no_warn_on_redef] begin[:]
return[constant[False]] | keyword[def] identifier[__should_warn_on_redef] (
identifier[ctx] : identifier[GeneratorContext] , identifier[defsym] : identifier[sym] . identifier[Symbol] , identifier[safe_name] : identifier[str] , identifier[def_meta] : identifier[lmap] . identifier[Map]
)-> identifier[bool] :
literal[string]
identifier[no_warn_on_redef] = identifier[def_meta] . identifier[entry] ( identifier[SYM_NO_WARN_ON_REDEF_META_KEY] , keyword[False] )
keyword[if] identifier[no_warn_on_redef] :
keyword[return] keyword[False]
keyword[elif] identifier[safe_name] keyword[in] identifier[ctx] . identifier[current_ns] . identifier[module] . identifier[__dict__] :
keyword[return] keyword[True]
keyword[elif] identifier[defsym] keyword[in] identifier[ctx] . identifier[current_ns] . identifier[interns] :
identifier[var] = identifier[ctx] . identifier[current_ns] . identifier[find] ( identifier[defsym] )
keyword[assert] identifier[var] keyword[is] keyword[not] keyword[None] , literal[string]
keyword[if] identifier[var] . identifier[meta] keyword[is] keyword[not] keyword[None] keyword[and] identifier[var] . identifier[meta] . identifier[entry] ( identifier[SYM_REDEF_META_KEY] ):
keyword[return] keyword[False]
keyword[elif] identifier[var] . identifier[is_bound] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[False] | def __should_warn_on_redef(ctx: GeneratorContext, defsym: sym.Symbol, safe_name: str, def_meta: lmap.Map) -> bool:
"""Return True if the compiler should emit a warning about this name being redefined."""
no_warn_on_redef = def_meta.entry(SYM_NO_WARN_ON_REDEF_META_KEY, False)
if no_warn_on_redef:
return False # depends on [control=['if'], data=[]]
elif safe_name in ctx.current_ns.module.__dict__:
return True # depends on [control=['if'], data=[]]
elif defsym in ctx.current_ns.interns:
var = ctx.current_ns.find(defsym)
assert var is not None, f'Var {defsym} cannot be none here'
if var.meta is not None and var.meta.entry(SYM_REDEF_META_KEY):
return False # depends on [control=['if'], data=[]]
elif var.is_bound:
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=['defsym']]
else:
return False |
def _bottom(self):
"""Index of row following last row of range"""
_, top, _, height = self._extents
return top + height | def function[_bottom, parameter[self]]:
constant[Index of row following last row of range]
<ast.Tuple object at 0x7da20c6a9390> assign[=] name[self]._extents
return[binary_operation[name[top] + name[height]]] | keyword[def] identifier[_bottom] ( identifier[self] ):
literal[string]
identifier[_] , identifier[top] , identifier[_] , identifier[height] = identifier[self] . identifier[_extents]
keyword[return] identifier[top] + identifier[height] | def _bottom(self):
"""Index of row following last row of range"""
(_, top, _, height) = self._extents
return top + height |
def sent_splitter_ja(text, delimiters=set(u'。.?!\n\r'),
parenthesis=u'()「」『』“”'):
'''
Args:
text: unicode string that contains multiple Japanese sentences.
delimiters: set() of sentence delimiter characters.
parenthesis: to be checked its correspondence.
Returns:
generator that yields sentences.
'''
paren_chars = set(parenthesis)
close2open = dict(zip(parenthesis[1::2], parenthesis[0::2]))
pstack = []
buff = []
for i, c in enumerate(text):
c_next = text[i+1] if i+1 < len(text) else None
# check correspondence of parenthesis
if c in paren_chars:
if c in close2open: # close
if len(pstack) > 0 and pstack[-1] == close2open[c]:
pstack.pop()
else: # open
pstack.append(c)
buff.append(c)
if c in delimiters:
if len(pstack) == 0 and c_next not in delimiters:
yield ''.join(buff)
buff = []
if len(buff) > 0:
yield ''.join(buff) | def function[sent_splitter_ja, parameter[text, delimiters, parenthesis]]:
constant[
Args:
text: unicode string that contains multiple Japanese sentences.
delimiters: set() of sentence delimiter characters.
parenthesis: to be checked its correspondence.
Returns:
generator that yields sentences.
]
variable[paren_chars] assign[=] call[name[set], parameter[name[parenthesis]]]
variable[close2open] assign[=] call[name[dict], parameter[call[name[zip], parameter[call[name[parenthesis]][<ast.Slice object at 0x7da204346500>], call[name[parenthesis]][<ast.Slice object at 0x7da204346020>]]]]]
variable[pstack] assign[=] list[[]]
variable[buff] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204346350>, <ast.Name object at 0x7da2043471c0>]]] in starred[call[name[enumerate], parameter[name[text]]]] begin[:]
variable[c_next] assign[=] <ast.IfExp object at 0x7da204345210>
if compare[name[c] in name[paren_chars]] begin[:]
if compare[name[c] in name[close2open]] begin[:]
if <ast.BoolOp object at 0x7da204344190> begin[:]
call[name[pstack].pop, parameter[]]
call[name[buff].append, parameter[name[c]]]
if compare[name[c] in name[delimiters]] begin[:]
if <ast.BoolOp object at 0x7da204347b20> begin[:]
<ast.Yield object at 0x7da2043464d0>
variable[buff] assign[=] list[[]]
if compare[call[name[len], parameter[name[buff]]] greater[>] constant[0]] begin[:]
<ast.Yield object at 0x7da204347f40> | keyword[def] identifier[sent_splitter_ja] ( identifier[text] , identifier[delimiters] = identifier[set] ( literal[string] ),
identifier[parenthesis] = literal[string] ):
literal[string]
identifier[paren_chars] = identifier[set] ( identifier[parenthesis] )
identifier[close2open] = identifier[dict] ( identifier[zip] ( identifier[parenthesis] [ literal[int] :: literal[int] ], identifier[parenthesis] [ literal[int] :: literal[int] ]))
identifier[pstack] =[]
identifier[buff] =[]
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[text] ):
identifier[c_next] = identifier[text] [ identifier[i] + literal[int] ] keyword[if] identifier[i] + literal[int] < identifier[len] ( identifier[text] ) keyword[else] keyword[None]
keyword[if] identifier[c] keyword[in] identifier[paren_chars] :
keyword[if] identifier[c] keyword[in] identifier[close2open] :
keyword[if] identifier[len] ( identifier[pstack] )> literal[int] keyword[and] identifier[pstack] [- literal[int] ]== identifier[close2open] [ identifier[c] ]:
identifier[pstack] . identifier[pop] ()
keyword[else] :
identifier[pstack] . identifier[append] ( identifier[c] )
identifier[buff] . identifier[append] ( identifier[c] )
keyword[if] identifier[c] keyword[in] identifier[delimiters] :
keyword[if] identifier[len] ( identifier[pstack] )== literal[int] keyword[and] identifier[c_next] keyword[not] keyword[in] identifier[delimiters] :
keyword[yield] literal[string] . identifier[join] ( identifier[buff] )
identifier[buff] =[]
keyword[if] identifier[len] ( identifier[buff] )> literal[int] :
keyword[yield] literal[string] . identifier[join] ( identifier[buff] ) | def sent_splitter_ja(text, delimiters=set(u'。.?!\n\r'), parenthesis=u'()「」『』“”'):
"""
Args:
text: unicode string that contains multiple Japanese sentences.
delimiters: set() of sentence delimiter characters.
parenthesis: to be checked its correspondence.
Returns:
generator that yields sentences.
"""
paren_chars = set(parenthesis)
close2open = dict(zip(parenthesis[1::2], parenthesis[0::2]))
pstack = []
buff = []
for (i, c) in enumerate(text):
c_next = text[i + 1] if i + 1 < len(text) else None
# check correspondence of parenthesis
if c in paren_chars:
if c in close2open: # close
if len(pstack) > 0 and pstack[-1] == close2open[c]:
pstack.pop() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['c', 'close2open']]
else: # open
pstack.append(c) # depends on [control=['if'], data=['c']]
buff.append(c)
if c in delimiters:
if len(pstack) == 0 and c_next not in delimiters:
yield ''.join(buff)
buff = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['delimiters']] # depends on [control=['for'], data=[]]
if len(buff) > 0:
yield ''.join(buff) # depends on [control=['if'], data=[]] |
def try_number(value):
"""
Attempt to cast the string `value` to an int, and failing that, a float,
failing that, raise a ValueError.
"""
for cast_function in [int, float]:
try:
return cast_function(value)
except ValueError:
pass
raise ValueError("Unable to use value as int or float: {0!r}"
.format(value)) | def function[try_number, parameter[value]]:
constant[
Attempt to cast the string `value` to an int, and failing that, a float,
failing that, raise a ValueError.
]
for taget[name[cast_function]] in starred[list[[<ast.Name object at 0x7da1b2407610>, <ast.Name object at 0x7da1b2407190>]]] begin[:]
<ast.Try object at 0x7da1b24044c0>
<ast.Raise object at 0x7da1b2404610> | keyword[def] identifier[try_number] ( identifier[value] ):
literal[string]
keyword[for] identifier[cast_function] keyword[in] [ identifier[int] , identifier[float] ]:
keyword[try] :
keyword[return] identifier[cast_function] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[raise] identifier[ValueError] ( literal[string]
. identifier[format] ( identifier[value] )) | def try_number(value):
"""
Attempt to cast the string `value` to an int, and failing that, a float,
failing that, raise a ValueError.
"""
for cast_function in [int, float]:
try:
return cast_function(value) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['cast_function']]
raise ValueError('Unable to use value as int or float: {0!r}'.format(value)) |
def _initial_broks(self, broker_name):
"""Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
"""
with self.app.conf_lock:
logger.info("A new broker just connected : %s", broker_name)
return self.app.sched.fill_initial_broks(broker_name) | def function[_initial_broks, parameter[self, broker_name]]:
constant[Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
]
with name[self].app.conf_lock begin[:]
call[name[logger].info, parameter[constant[A new broker just connected : %s], name[broker_name]]]
return[call[name[self].app.sched.fill_initial_broks, parameter[name[broker_name]]]] | keyword[def] identifier[_initial_broks] ( identifier[self] , identifier[broker_name] ):
literal[string]
keyword[with] identifier[self] . identifier[app] . identifier[conf_lock] :
identifier[logger] . identifier[info] ( literal[string] , identifier[broker_name] )
keyword[return] identifier[self] . identifier[app] . identifier[sched] . identifier[fill_initial_broks] ( identifier[broker_name] ) | def _initial_broks(self, broker_name):
"""Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
"""
with self.app.conf_lock:
logger.info('A new broker just connected : %s', broker_name)
return self.app.sched.fill_initial_broks(broker_name) # depends on [control=['with'], data=[]] |
def attach_schema(self, schem):
"""Add a tuple schema to this object (externally imposed)"""
self.tuple_schema = schema.AndSchema.make(self.tuple_schema, schem) | def function[attach_schema, parameter[self, schem]]:
constant[Add a tuple schema to this object (externally imposed)]
name[self].tuple_schema assign[=] call[name[schema].AndSchema.make, parameter[name[self].tuple_schema, name[schem]]] | keyword[def] identifier[attach_schema] ( identifier[self] , identifier[schem] ):
literal[string]
identifier[self] . identifier[tuple_schema] = identifier[schema] . identifier[AndSchema] . identifier[make] ( identifier[self] . identifier[tuple_schema] , identifier[schem] ) | def attach_schema(self, schem):
"""Add a tuple schema to this object (externally imposed)"""
self.tuple_schema = schema.AndSchema.make(self.tuple_schema, schem) |
def convert(dbus_obj):
"""Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type.
"""
_isinstance = partial(isinstance, dbus_obj)
ConvertType = namedtuple('ConvertType', 'pytype dbustypes')
pyint = ConvertType(int, (dbus.Byte, dbus.Int16, dbus.Int32, dbus.Int64,
dbus.UInt16, dbus.UInt32, dbus.UInt64))
pybool = ConvertType(bool, (dbus.Boolean, ))
pyfloat = ConvertType(float, (dbus.Double, ))
pylist = ConvertType(lambda _obj: list(map(convert, dbus_obj)),
(dbus.Array, ))
pytuple = ConvertType(lambda _obj: tuple(map(convert, dbus_obj)),
(dbus.Struct, ))
types_str = (dbus.ObjectPath, dbus.Signature, dbus.String)
if not PY3:
types_str += (dbus.UTF8String,)
pystr = ConvertType(str if PY3 else unicode, types_str)
pydict = ConvertType(
lambda _obj: dict(zip(map(convert, dbus_obj.keys()),
map(convert, dbus_obj.values())
)
),
(dbus.Dictionary, )
)
for conv in (pyint, pybool, pyfloat, pylist, pytuple, pystr, pydict):
if any(map(_isinstance, conv.dbustypes)):
return conv.pytype(dbus_obj)
else:
return dbus_obj | def function[convert, parameter[dbus_obj]]:
constant[Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type.
]
variable[_isinstance] assign[=] call[name[partial], parameter[name[isinstance], name[dbus_obj]]]
variable[ConvertType] assign[=] call[name[namedtuple], parameter[constant[ConvertType], constant[pytype dbustypes]]]
variable[pyint] assign[=] call[name[ConvertType], parameter[name[int], tuple[[<ast.Attribute object at 0x7da18f09ec80>, <ast.Attribute object at 0x7da18f09fac0>, <ast.Attribute object at 0x7da18f09e740>, <ast.Attribute object at 0x7da18f09e590>, <ast.Attribute object at 0x7da18f09ee30>, <ast.Attribute object at 0x7da18f09fb20>, <ast.Attribute object at 0x7da18f09f3d0>]]]]
variable[pybool] assign[=] call[name[ConvertType], parameter[name[bool], tuple[[<ast.Attribute object at 0x7da18f09f5e0>]]]]
variable[pyfloat] assign[=] call[name[ConvertType], parameter[name[float], tuple[[<ast.Attribute object at 0x7da18f09db40>]]]]
variable[pylist] assign[=] call[name[ConvertType], parameter[<ast.Lambda object at 0x7da18f09f5b0>, tuple[[<ast.Attribute object at 0x7da18f09d0c0>]]]]
variable[pytuple] assign[=] call[name[ConvertType], parameter[<ast.Lambda object at 0x7da18f09d840>, tuple[[<ast.Attribute object at 0x7da18f09ff70>]]]]
variable[types_str] assign[=] tuple[[<ast.Attribute object at 0x7da18f09da80>, <ast.Attribute object at 0x7da18f09d600>, <ast.Attribute object at 0x7da18f09d2d0>]]
if <ast.UnaryOp object at 0x7da18f09df90> begin[:]
<ast.AugAssign object at 0x7da18f09c430>
variable[pystr] assign[=] call[name[ConvertType], parameter[<ast.IfExp object at 0x7da18f09e080>, name[types_str]]]
variable[pydict] assign[=] call[name[ConvertType], parameter[<ast.Lambda object at 0x7da18f09cc40>, tuple[[<ast.Attribute object at 0x7da18f09ded0>]]]]
for taget[name[conv]] in starred[tuple[[<ast.Name object at 0x7da18f09e470>, <ast.Name object at 0x7da18f09e5f0>, <ast.Name object at 0x7da18dc04e80>, <ast.Name object at 0x7da18dc06f20>, <ast.Name object at 0x7da18dc072b0>, <ast.Name object at 0x7da18dc06200>, <ast.Name object at 0x7da18dc04760>]]] begin[:]
if call[name[any], parameter[call[name[map], parameter[name[_isinstance], name[conv].dbustypes]]]] begin[:]
return[call[name[conv].pytype, parameter[name[dbus_obj]]]] | keyword[def] identifier[convert] ( identifier[dbus_obj] ):
literal[string]
identifier[_isinstance] = identifier[partial] ( identifier[isinstance] , identifier[dbus_obj] )
identifier[ConvertType] = identifier[namedtuple] ( literal[string] , literal[string] )
identifier[pyint] = identifier[ConvertType] ( identifier[int] ,( identifier[dbus] . identifier[Byte] , identifier[dbus] . identifier[Int16] , identifier[dbus] . identifier[Int32] , identifier[dbus] . identifier[Int64] ,
identifier[dbus] . identifier[UInt16] , identifier[dbus] . identifier[UInt32] , identifier[dbus] . identifier[UInt64] ))
identifier[pybool] = identifier[ConvertType] ( identifier[bool] ,( identifier[dbus] . identifier[Boolean] ,))
identifier[pyfloat] = identifier[ConvertType] ( identifier[float] ,( identifier[dbus] . identifier[Double] ,))
identifier[pylist] = identifier[ConvertType] ( keyword[lambda] identifier[_obj] : identifier[list] ( identifier[map] ( identifier[convert] , identifier[dbus_obj] )),
( identifier[dbus] . identifier[Array] ,))
identifier[pytuple] = identifier[ConvertType] ( keyword[lambda] identifier[_obj] : identifier[tuple] ( identifier[map] ( identifier[convert] , identifier[dbus_obj] )),
( identifier[dbus] . identifier[Struct] ,))
identifier[types_str] =( identifier[dbus] . identifier[ObjectPath] , identifier[dbus] . identifier[Signature] , identifier[dbus] . identifier[String] )
keyword[if] keyword[not] identifier[PY3] :
identifier[types_str] +=( identifier[dbus] . identifier[UTF8String] ,)
identifier[pystr] = identifier[ConvertType] ( identifier[str] keyword[if] identifier[PY3] keyword[else] identifier[unicode] , identifier[types_str] )
identifier[pydict] = identifier[ConvertType] (
keyword[lambda] identifier[_obj] : identifier[dict] ( identifier[zip] ( identifier[map] ( identifier[convert] , identifier[dbus_obj] . identifier[keys] ()),
identifier[map] ( identifier[convert] , identifier[dbus_obj] . identifier[values] ())
)
),
( identifier[dbus] . identifier[Dictionary] ,)
)
keyword[for] identifier[conv] keyword[in] ( identifier[pyint] , identifier[pybool] , identifier[pyfloat] , identifier[pylist] , identifier[pytuple] , identifier[pystr] , identifier[pydict] ):
keyword[if] identifier[any] ( identifier[map] ( identifier[_isinstance] , identifier[conv] . identifier[dbustypes] )):
keyword[return] identifier[conv] . identifier[pytype] ( identifier[dbus_obj] )
keyword[else] :
keyword[return] identifier[dbus_obj] | def convert(dbus_obj):
"""Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type.
"""
_isinstance = partial(isinstance, dbus_obj)
ConvertType = namedtuple('ConvertType', 'pytype dbustypes')
pyint = ConvertType(int, (dbus.Byte, dbus.Int16, dbus.Int32, dbus.Int64, dbus.UInt16, dbus.UInt32, dbus.UInt64))
pybool = ConvertType(bool, (dbus.Boolean,))
pyfloat = ConvertType(float, (dbus.Double,))
pylist = ConvertType(lambda _obj: list(map(convert, dbus_obj)), (dbus.Array,))
pytuple = ConvertType(lambda _obj: tuple(map(convert, dbus_obj)), (dbus.Struct,))
types_str = (dbus.ObjectPath, dbus.Signature, dbus.String)
if not PY3:
types_str += (dbus.UTF8String,) # depends on [control=['if'], data=[]]
pystr = ConvertType(str if PY3 else unicode, types_str)
pydict = ConvertType(lambda _obj: dict(zip(map(convert, dbus_obj.keys()), map(convert, dbus_obj.values()))), (dbus.Dictionary,))
for conv in (pyint, pybool, pyfloat, pylist, pytuple, pystr, pydict):
if any(map(_isinstance, conv.dbustypes)):
return conv.pytype(dbus_obj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['conv']]
else:
return dbus_obj |
def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}):
"""Send a direct message
"""
params = params.copy()
if user is not None:
params['user'] = user
if user_id is not None:
params['user_id'] = user_id
if screen_name is not None:
params['screen_name'] = screen_name
params['text'] = text
parser = txml.Direct(delegate)
return self.__postPage('/direct_messages/new.xml', parser, params) | def function[send_direct_message, parameter[self, text, user, delegate, screen_name, user_id, params]]:
constant[Send a direct message
]
variable[params] assign[=] call[name[params].copy, parameter[]]
if compare[name[user] is_not constant[None]] begin[:]
call[name[params]][constant[user]] assign[=] name[user]
if compare[name[user_id] is_not constant[None]] begin[:]
call[name[params]][constant[user_id]] assign[=] name[user_id]
if compare[name[screen_name] is_not constant[None]] begin[:]
call[name[params]][constant[screen_name]] assign[=] name[screen_name]
call[name[params]][constant[text]] assign[=] name[text]
variable[parser] assign[=] call[name[txml].Direct, parameter[name[delegate]]]
return[call[name[self].__postPage, parameter[constant[/direct_messages/new.xml], name[parser], name[params]]]] | keyword[def] identifier[send_direct_message] ( identifier[self] , identifier[text] , identifier[user] = keyword[None] , identifier[delegate] = keyword[None] , identifier[screen_name] = keyword[None] , identifier[user_id] = keyword[None] , identifier[params] ={}):
literal[string]
identifier[params] = identifier[params] . identifier[copy] ()
keyword[if] identifier[user] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[user]
keyword[if] identifier[user_id] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[user_id]
keyword[if] identifier[screen_name] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[screen_name]
identifier[params] [ literal[string] ]= identifier[text]
identifier[parser] = identifier[txml] . identifier[Direct] ( identifier[delegate] )
keyword[return] identifier[self] . identifier[__postPage] ( literal[string] , identifier[parser] , identifier[params] ) | def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}):
"""Send a direct message
"""
params = params.copy()
if user is not None:
params['user'] = user # depends on [control=['if'], data=['user']]
if user_id is not None:
params['user_id'] = user_id # depends on [control=['if'], data=['user_id']]
if screen_name is not None:
params['screen_name'] = screen_name # depends on [control=['if'], data=['screen_name']]
params['text'] = text
parser = txml.Direct(delegate)
return self.__postPage('/direct_messages/new.xml', parser, params) |
def handle_tick(self):
"""Internal callback every time 1 second has passed."""
self.uptime += 1
for name, interval in self.ticks.items():
if interval == 0:
continue
self.tick_counters[name] += 1
if self.tick_counters[name] == interval:
self.graph_input(self.TICK_STREAMS[name], self.uptime)
self.tick_counters[name] = 0 | def function[handle_tick, parameter[self]]:
constant[Internal callback every time 1 second has passed.]
<ast.AugAssign object at 0x7da20e9b2ef0>
for taget[tuple[[<ast.Name object at 0x7da20e9b22f0>, <ast.Name object at 0x7da20e9b2320>]]] in starred[call[name[self].ticks.items, parameter[]]] begin[:]
if compare[name[interval] equal[==] constant[0]] begin[:]
continue
<ast.AugAssign object at 0x7da20e9b1000>
if compare[call[name[self].tick_counters][name[name]] equal[==] name[interval]] begin[:]
call[name[self].graph_input, parameter[call[name[self].TICK_STREAMS][name[name]], name[self].uptime]]
call[name[self].tick_counters][name[name]] assign[=] constant[0] | keyword[def] identifier[handle_tick] ( identifier[self] ):
literal[string]
identifier[self] . identifier[uptime] += literal[int]
keyword[for] identifier[name] , identifier[interval] keyword[in] identifier[self] . identifier[ticks] . identifier[items] ():
keyword[if] identifier[interval] == literal[int] :
keyword[continue]
identifier[self] . identifier[tick_counters] [ identifier[name] ]+= literal[int]
keyword[if] identifier[self] . identifier[tick_counters] [ identifier[name] ]== identifier[interval] :
identifier[self] . identifier[graph_input] ( identifier[self] . identifier[TICK_STREAMS] [ identifier[name] ], identifier[self] . identifier[uptime] )
identifier[self] . identifier[tick_counters] [ identifier[name] ]= literal[int] | def handle_tick(self):
"""Internal callback every time 1 second has passed."""
self.uptime += 1
for (name, interval) in self.ticks.items():
if interval == 0:
continue # depends on [control=['if'], data=[]]
self.tick_counters[name] += 1
if self.tick_counters[name] == interval:
self.graph_input(self.TICK_STREAMS[name], self.uptime)
self.tick_counters[name] = 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
} | def function[serialize, parameter[self]]:
constant[Returns serialized chunk data in dictionary.]
return[dictionary[[<ast.Constant object at 0x7da1b12f2230>, <ast.Constant object at 0x7da1b12f2470>, <ast.Constant object at 0x7da1b12f0ca0>, <ast.Constant object at 0x7da1b12f1330>, <ast.Constant object at 0x7da1b12f2b00>], [<ast.Attribute object at 0x7da1b12f11b0>, <ast.Attribute object at 0x7da1b12f36a0>, <ast.Attribute object at 0x7da1b12f1810>, <ast.Attribute object at 0x7da1b12f3d00>, <ast.Call object at 0x7da1b12f37f0>]]] | keyword[def] identifier[serialize] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[word] ,
literal[string] : identifier[self] . identifier[pos] ,
literal[string] : identifier[self] . identifier[label] ,
literal[string] : identifier[self] . identifier[dependency] ,
literal[string] : identifier[self] . identifier[has_cjk] (),
} | def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {'word': self.word, 'pos': self.pos, 'label': self.label, 'dependency': self.dependency, 'has_cjk': self.has_cjk()} |
def encrypt(self, key, iv="", cek="", **kwargs):
"""
Produces a JWE as defined in RFC7516 using symmetric keys
:param key: Shared symmetric key
:param iv: Initialization vector
:param cek: Content master key
:param kwargs: Extra keyword arguments, just ignore for now.
:return:
"""
_msg = as_bytes(self.msg)
_args = self._dict
try:
_args["kid"] = kwargs["kid"]
except KeyError:
pass
jwe = JWEnc(**_args)
# If no iv and cek are given generate them
iv = self._generate_iv(self["enc"], iv)
cek = self._generate_key(self["enc"], cek)
if isinstance(key, SYMKey):
try:
kek = key.key.encode('utf8')
except AttributeError:
kek = key.key
elif isinstance(key, bytes):
kek = key
else:
kek = intarr2str(key)
# The iv for this function must be 64 bit
# Which is certainly different from the one above
jek = aes_key_wrap(kek, cek, default_backend())
_enc = self["enc"]
_auth_data = jwe.b64_encode_header()
ctxt, tag, cek = self.enc_setup(_enc, _msg, auth_data=_auth_data,
key=cek, iv=iv)
return jwe.pack(parts=[jek, iv, ctxt, tag]) | def function[encrypt, parameter[self, key, iv, cek]]:
constant[
Produces a JWE as defined in RFC7516 using symmetric keys
:param key: Shared symmetric key
:param iv: Initialization vector
:param cek: Content master key
:param kwargs: Extra keyword arguments, just ignore for now.
:return:
]
variable[_msg] assign[=] call[name[as_bytes], parameter[name[self].msg]]
variable[_args] assign[=] name[self]._dict
<ast.Try object at 0x7da1b05599f0>
variable[jwe] assign[=] call[name[JWEnc], parameter[]]
variable[iv] assign[=] call[name[self]._generate_iv, parameter[call[name[self]][constant[enc]], name[iv]]]
variable[cek] assign[=] call[name[self]._generate_key, parameter[call[name[self]][constant[enc]], name[cek]]]
if call[name[isinstance], parameter[name[key], name[SYMKey]]] begin[:]
<ast.Try object at 0x7da1b02e5060>
variable[jek] assign[=] call[name[aes_key_wrap], parameter[name[kek], name[cek], call[name[default_backend], parameter[]]]]
variable[_enc] assign[=] call[name[self]][constant[enc]]
variable[_auth_data] assign[=] call[name[jwe].b64_encode_header, parameter[]]
<ast.Tuple object at 0x7da1b0558e80> assign[=] call[name[self].enc_setup, parameter[name[_enc], name[_msg]]]
return[call[name[jwe].pack, parameter[]]] | keyword[def] identifier[encrypt] ( identifier[self] , identifier[key] , identifier[iv] = literal[string] , identifier[cek] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[_msg] = identifier[as_bytes] ( identifier[self] . identifier[msg] )
identifier[_args] = identifier[self] . identifier[_dict]
keyword[try] :
identifier[_args] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[jwe] = identifier[JWEnc] (** identifier[_args] )
identifier[iv] = identifier[self] . identifier[_generate_iv] ( identifier[self] [ literal[string] ], identifier[iv] )
identifier[cek] = identifier[self] . identifier[_generate_key] ( identifier[self] [ literal[string] ], identifier[cek] )
keyword[if] identifier[isinstance] ( identifier[key] , identifier[SYMKey] ):
keyword[try] :
identifier[kek] = identifier[key] . identifier[key] . identifier[encode] ( literal[string] )
keyword[except] identifier[AttributeError] :
identifier[kek] = identifier[key] . identifier[key]
keyword[elif] identifier[isinstance] ( identifier[key] , identifier[bytes] ):
identifier[kek] = identifier[key]
keyword[else] :
identifier[kek] = identifier[intarr2str] ( identifier[key] )
identifier[jek] = identifier[aes_key_wrap] ( identifier[kek] , identifier[cek] , identifier[default_backend] ())
identifier[_enc] = identifier[self] [ literal[string] ]
identifier[_auth_data] = identifier[jwe] . identifier[b64_encode_header] ()
identifier[ctxt] , identifier[tag] , identifier[cek] = identifier[self] . identifier[enc_setup] ( identifier[_enc] , identifier[_msg] , identifier[auth_data] = identifier[_auth_data] ,
identifier[key] = identifier[cek] , identifier[iv] = identifier[iv] )
keyword[return] identifier[jwe] . identifier[pack] ( identifier[parts] =[ identifier[jek] , identifier[iv] , identifier[ctxt] , identifier[tag] ]) | def encrypt(self, key, iv='', cek='', **kwargs):
"""
Produces a JWE as defined in RFC7516 using symmetric keys
:param key: Shared symmetric key
:param iv: Initialization vector
:param cek: Content master key
:param kwargs: Extra keyword arguments, just ignore for now.
:return:
"""
_msg = as_bytes(self.msg)
_args = self._dict
try:
_args['kid'] = kwargs['kid'] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
jwe = JWEnc(**_args)
# If no iv and cek are given generate them
iv = self._generate_iv(self['enc'], iv)
cek = self._generate_key(self['enc'], cek)
if isinstance(key, SYMKey):
try:
kek = key.key.encode('utf8') # depends on [control=['try'], data=[]]
except AttributeError:
kek = key.key # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(key, bytes):
kek = key # depends on [control=['if'], data=[]]
else:
kek = intarr2str(key)
# The iv for this function must be 64 bit
# Which is certainly different from the one above
jek = aes_key_wrap(kek, cek, default_backend())
_enc = self['enc']
_auth_data = jwe.b64_encode_header()
(ctxt, tag, cek) = self.enc_setup(_enc, _msg, auth_data=_auth_data, key=cek, iv=iv)
return jwe.pack(parts=[jek, iv, ctxt, tag]) |
def search(self, buf):
"""Search the provided buffer for matching bytes.
Search the provided buffer for matching bytes. If the *match* is found,
returns a :class:`SequenceMatch` object, otherwise returns ``None``.
:param buf: Buffer to search for a match.
:return: :class:`SequenceMatch` if matched, None if no match was found.
"""
idx = self._check_type(buf).find(self._bytes)
if idx < 0:
return None
else:
start = idx
end = idx + len(self._bytes)
return SequenceMatch(self, buf[start:end], start, end) | def function[search, parameter[self, buf]]:
constant[Search the provided buffer for matching bytes.
Search the provided buffer for matching bytes. If the *match* is found,
returns a :class:`SequenceMatch` object, otherwise returns ``None``.
:param buf: Buffer to search for a match.
:return: :class:`SequenceMatch` if matched, None if no match was found.
]
variable[idx] assign[=] call[call[name[self]._check_type, parameter[name[buf]]].find, parameter[name[self]._bytes]]
if compare[name[idx] less[<] constant[0]] begin[:]
return[constant[None]] | keyword[def] identifier[search] ( identifier[self] , identifier[buf] ):
literal[string]
identifier[idx] = identifier[self] . identifier[_check_type] ( identifier[buf] ). identifier[find] ( identifier[self] . identifier[_bytes] )
keyword[if] identifier[idx] < literal[int] :
keyword[return] keyword[None]
keyword[else] :
identifier[start] = identifier[idx]
identifier[end] = identifier[idx] + identifier[len] ( identifier[self] . identifier[_bytes] )
keyword[return] identifier[SequenceMatch] ( identifier[self] , identifier[buf] [ identifier[start] : identifier[end] ], identifier[start] , identifier[end] ) | def search(self, buf):
"""Search the provided buffer for matching bytes.
Search the provided buffer for matching bytes. If the *match* is found,
returns a :class:`SequenceMatch` object, otherwise returns ``None``.
:param buf: Buffer to search for a match.
:return: :class:`SequenceMatch` if matched, None if no match was found.
"""
idx = self._check_type(buf).find(self._bytes)
if idx < 0:
return None # depends on [control=['if'], data=[]]
else:
start = idx
end = idx + len(self._bytes)
return SequenceMatch(self, buf[start:end], start, end) |
def rpc_get_oneline_docstring(self, filename, source, offset):
"""Get a oneline docstring for the symbol at the offset.
"""
return self._call_backend("rpc_get_oneline_docstring", None, filename,
get_source(source), offset) | def function[rpc_get_oneline_docstring, parameter[self, filename, source, offset]]:
constant[Get a oneline docstring for the symbol at the offset.
]
return[call[name[self]._call_backend, parameter[constant[rpc_get_oneline_docstring], constant[None], name[filename], call[name[get_source], parameter[name[source]]], name[offset]]]] | keyword[def] identifier[rpc_get_oneline_docstring] ( identifier[self] , identifier[filename] , identifier[source] , identifier[offset] ):
literal[string]
keyword[return] identifier[self] . identifier[_call_backend] ( literal[string] , keyword[None] , identifier[filename] ,
identifier[get_source] ( identifier[source] ), identifier[offset] ) | def rpc_get_oneline_docstring(self, filename, source, offset):
"""Get a oneline docstring for the symbol at the offset.
"""
return self._call_backend('rpc_get_oneline_docstring', None, filename, get_source(source), offset) |
def query(cls, resources, time_range, query, resource_type, sampler):
"""Query statistics for given resources."""
if not isinstance(resources, (list, tuple)):
resources = [resources]
now = time.time()
start_utc = datetime.utcfromtimestamp(now - time_range)
end_utc = datetime.utcfromtimestamp(now)
date_format = '%Y-%m-%d %H:%M:%S'
start = start_utc.strftime(date_format)
end = end_utc.strftime(date_format)
query = {'start': start,
'end': end,
'query': query,
'resource_id': resources,
'resource_type': resource_type,
'sampler': sampler}
return cls.call('hosting.metric.query', query) | def function[query, parameter[cls, resources, time_range, query, resource_type, sampler]]:
constant[Query statistics for given resources.]
if <ast.UnaryOp object at 0x7da207f984c0> begin[:]
variable[resources] assign[=] list[[<ast.Name object at 0x7da207f9a530>]]
variable[now] assign[=] call[name[time].time, parameter[]]
variable[start_utc] assign[=] call[name[datetime].utcfromtimestamp, parameter[binary_operation[name[now] - name[time_range]]]]
variable[end_utc] assign[=] call[name[datetime].utcfromtimestamp, parameter[name[now]]]
variable[date_format] assign[=] constant[%Y-%m-%d %H:%M:%S]
variable[start] assign[=] call[name[start_utc].strftime, parameter[name[date_format]]]
variable[end] assign[=] call[name[end_utc].strftime, parameter[name[date_format]]]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da207f9be50>, <ast.Constant object at 0x7da207f9a890>, <ast.Constant object at 0x7da207f9a680>, <ast.Constant object at 0x7da207f99690>, <ast.Constant object at 0x7da207f99a20>, <ast.Constant object at 0x7da207f9b820>], [<ast.Name object at 0x7da207f9a920>, <ast.Name object at 0x7da207f98220>, <ast.Name object at 0x7da207f9b6d0>, <ast.Name object at 0x7da207f9b9a0>, <ast.Name object at 0x7da207f98730>, <ast.Name object at 0x7da207f99e10>]]
return[call[name[cls].call, parameter[constant[hosting.metric.query], name[query]]]] | keyword[def] identifier[query] ( identifier[cls] , identifier[resources] , identifier[time_range] , identifier[query] , identifier[resource_type] , identifier[sampler] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[resources] ,( identifier[list] , identifier[tuple] )):
identifier[resources] =[ identifier[resources] ]
identifier[now] = identifier[time] . identifier[time] ()
identifier[start_utc] = identifier[datetime] . identifier[utcfromtimestamp] ( identifier[now] - identifier[time_range] )
identifier[end_utc] = identifier[datetime] . identifier[utcfromtimestamp] ( identifier[now] )
identifier[date_format] = literal[string]
identifier[start] = identifier[start_utc] . identifier[strftime] ( identifier[date_format] )
identifier[end] = identifier[end_utc] . identifier[strftime] ( identifier[date_format] )
identifier[query] ={ literal[string] : identifier[start] ,
literal[string] : identifier[end] ,
literal[string] : identifier[query] ,
literal[string] : identifier[resources] ,
literal[string] : identifier[resource_type] ,
literal[string] : identifier[sampler] }
keyword[return] identifier[cls] . identifier[call] ( literal[string] , identifier[query] ) | def query(cls, resources, time_range, query, resource_type, sampler):
"""Query statistics for given resources."""
if not isinstance(resources, (list, tuple)):
resources = [resources] # depends on [control=['if'], data=[]]
now = time.time()
start_utc = datetime.utcfromtimestamp(now - time_range)
end_utc = datetime.utcfromtimestamp(now)
date_format = '%Y-%m-%d %H:%M:%S'
start = start_utc.strftime(date_format)
end = end_utc.strftime(date_format)
query = {'start': start, 'end': end, 'query': query, 'resource_id': resources, 'resource_type': resource_type, 'sampler': sampler}
return cls.call('hosting.metric.query', query) |
def get_keyword_hierarchy(self, pattern="*"):
"""Returns all keywords that match a glob-style pattern
The result is a list of dictionaries, sorted by collection
name.
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name
"""
sql = """SELECT collection.collection_id, collection.name, collection.path,
keyword.name, keyword.doc
FROM collection_table as collection
JOIN keyword_table as keyword
WHERE collection.collection_id == keyword.collection_id
AND keyword.name like ?
ORDER by collection.name, collection.collection_id, keyword.name
"""
cursor = self._execute(sql, (self._glob_to_sql(pattern),))
libraries = []
current_library = None
for row in cursor.fetchall():
(c_id, c_name, c_path, k_name, k_doc) = row
if c_id != current_library:
current_library = c_id
libraries.append({"name": c_name, "collection_id": c_id, "keywords": [], "path": c_path})
libraries[-1]["keywords"].append({"name": k_name, "doc": k_doc})
return libraries | def function[get_keyword_hierarchy, parameter[self, pattern]]:
constant[Returns all keywords that match a glob-style pattern
The result is a list of dictionaries, sorted by collection
name.
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name
]
variable[sql] assign[=] constant[SELECT collection.collection_id, collection.name, collection.path,
keyword.name, keyword.doc
FROM collection_table as collection
JOIN keyword_table as keyword
WHERE collection.collection_id == keyword.collection_id
AND keyword.name like ?
ORDER by collection.name, collection.collection_id, keyword.name
]
variable[cursor] assign[=] call[name[self]._execute, parameter[name[sql], tuple[[<ast.Call object at 0x7da1b1544d90>]]]]
variable[libraries] assign[=] list[[]]
variable[current_library] assign[=] constant[None]
for taget[name[row]] in starred[call[name[cursor].fetchall, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b17f9570> assign[=] name[row]
if compare[name[c_id] not_equal[!=] name[current_library]] begin[:]
variable[current_library] assign[=] name[c_id]
call[name[libraries].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1519990>, <ast.Constant object at 0x7da1b151b6d0>, <ast.Constant object at 0x7da1b151b0a0>, <ast.Constant object at 0x7da1b151bac0>], [<ast.Name object at 0x7da1b151a290>, <ast.Name object at 0x7da1b1518610>, <ast.List object at 0x7da1b151b610>, <ast.Name object at 0x7da1b151ad70>]]]]
call[call[call[name[libraries]][<ast.UnaryOp object at 0x7da1b1519c00>]][constant[keywords]].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1519c60>, <ast.Constant object at 0x7da1b151a2c0>], [<ast.Name object at 0x7da1b151b070>, <ast.Name object at 0x7da1b1518130>]]]]
return[name[libraries]] | keyword[def] identifier[get_keyword_hierarchy] ( identifier[self] , identifier[pattern] = literal[string] ):
literal[string]
identifier[sql] = literal[string]
identifier[cursor] = identifier[self] . identifier[_execute] ( identifier[sql] ,( identifier[self] . identifier[_glob_to_sql] ( identifier[pattern] ),))
identifier[libraries] =[]
identifier[current_library] = keyword[None]
keyword[for] identifier[row] keyword[in] identifier[cursor] . identifier[fetchall] ():
( identifier[c_id] , identifier[c_name] , identifier[c_path] , identifier[k_name] , identifier[k_doc] )= identifier[row]
keyword[if] identifier[c_id] != identifier[current_library] :
identifier[current_library] = identifier[c_id]
identifier[libraries] . identifier[append] ({ literal[string] : identifier[c_name] , literal[string] : identifier[c_id] , literal[string] :[], literal[string] : identifier[c_path] })
identifier[libraries] [- literal[int] ][ literal[string] ]. identifier[append] ({ literal[string] : identifier[k_name] , literal[string] : identifier[k_doc] })
keyword[return] identifier[libraries] | def get_keyword_hierarchy(self, pattern='*'):
"""Returns all keywords that match a glob-style pattern
The result is a list of dictionaries, sorted by collection
name.
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name
"""
sql = 'SELECT collection.collection_id, collection.name, collection.path,\n keyword.name, keyword.doc\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND keyword.name like ?\n ORDER by collection.name, collection.collection_id, keyword.name\n '
cursor = self._execute(sql, (self._glob_to_sql(pattern),))
libraries = []
current_library = None
for row in cursor.fetchall():
(c_id, c_name, c_path, k_name, k_doc) = row
if c_id != current_library:
current_library = c_id
libraries.append({'name': c_name, 'collection_id': c_id, 'keywords': [], 'path': c_path}) # depends on [control=['if'], data=['c_id', 'current_library']]
libraries[-1]['keywords'].append({'name': k_name, 'doc': k_doc}) # depends on [control=['for'], data=['row']]
return libraries |
def get_cmd(self):
"""Returns the full command to be executed at runtime"""
cmd = None
if self.test_program in ('nose', 'nosetests'):
cmd = "nosetests %s" % self.file_path
elif self.test_program == 'django':
executable = "%s/manage.py" % self.file_path
if os.path.exists(executable):
cmd = "python %s/manage.py test" % self.file_path
else:
cmd = "django-admin.py test"
elif self.test_program == 'py':
cmd = 'py.test %s' % self.file_path
elif self.test_program == 'symfony':
cmd = 'symfony test-all'
elif self.test_program == 'jelix':
# as seen on http://jelix.org/articles/fr/manuel-1.1/tests_unitaires
cmd = 'php tests.php'
elif self.test_program == 'phpunit':
cmd = 'phpunit'
elif self.test_program == 'sphinx':
cmd = 'make html'
elif self.test_program == 'tox':
cmd = 'tox'
if not cmd:
raise InvalidTestProgram("The test program %s is unknown. Valid options are: `nose`, `django` and `py`" % self.test_program)
# adding custom args
if self.custom_args:
cmd = '%s %s' % (cmd, self.custom_args)
return cmd | def function[get_cmd, parameter[self]]:
constant[Returns the full command to be executed at runtime]
variable[cmd] assign[=] constant[None]
if compare[name[self].test_program in tuple[[<ast.Constant object at 0x7da18f723ac0>, <ast.Constant object at 0x7da18f722a40>]]] begin[:]
variable[cmd] assign[=] binary_operation[constant[nosetests %s] <ast.Mod object at 0x7da2590d6920> name[self].file_path]
if <ast.UnaryOp object at 0x7da18fe901f0> begin[:]
<ast.Raise object at 0x7da18fe929e0>
if name[self].custom_args begin[:]
variable[cmd] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18fe90fd0>, <ast.Attribute object at 0x7da18fe93ee0>]]]
return[name[cmd]] | keyword[def] identifier[get_cmd] ( identifier[self] ):
literal[string]
identifier[cmd] = keyword[None]
keyword[if] identifier[self] . identifier[test_program] keyword[in] ( literal[string] , literal[string] ):
identifier[cmd] = literal[string] % identifier[self] . identifier[file_path]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[executable] = literal[string] % identifier[self] . identifier[file_path]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[executable] ):
identifier[cmd] = literal[string] % identifier[self] . identifier[file_path]
keyword[else] :
identifier[cmd] = literal[string]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[cmd] = literal[string] % identifier[self] . identifier[file_path]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[cmd] = literal[string]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[cmd] = literal[string]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[cmd] = literal[string]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[cmd] = literal[string]
keyword[elif] identifier[self] . identifier[test_program] == literal[string] :
identifier[cmd] = literal[string]
keyword[if] keyword[not] identifier[cmd] :
keyword[raise] identifier[InvalidTestProgram] ( literal[string] % identifier[self] . identifier[test_program] )
keyword[if] identifier[self] . identifier[custom_args] :
identifier[cmd] = literal[string] %( identifier[cmd] , identifier[self] . identifier[custom_args] )
keyword[return] identifier[cmd] | def get_cmd(self):
"""Returns the full command to be executed at runtime"""
cmd = None
if self.test_program in ('nose', 'nosetests'):
cmd = 'nosetests %s' % self.file_path # depends on [control=['if'], data=[]]
elif self.test_program == 'django':
executable = '%s/manage.py' % self.file_path
if os.path.exists(executable):
cmd = 'python %s/manage.py test' % self.file_path # depends on [control=['if'], data=[]]
else:
cmd = 'django-admin.py test' # depends on [control=['if'], data=[]]
elif self.test_program == 'py':
cmd = 'py.test %s' % self.file_path # depends on [control=['if'], data=[]]
elif self.test_program == 'symfony':
cmd = 'symfony test-all' # depends on [control=['if'], data=[]]
elif self.test_program == 'jelix':
# as seen on http://jelix.org/articles/fr/manuel-1.1/tests_unitaires
cmd = 'php tests.php' # depends on [control=['if'], data=[]]
elif self.test_program == 'phpunit':
cmd = 'phpunit' # depends on [control=['if'], data=[]]
elif self.test_program == 'sphinx':
cmd = 'make html' # depends on [control=['if'], data=[]]
elif self.test_program == 'tox':
cmd = 'tox' # depends on [control=['if'], data=[]]
if not cmd:
raise InvalidTestProgram('The test program %s is unknown. Valid options are: `nose`, `django` and `py`' % self.test_program) # depends on [control=['if'], data=[]]
# adding custom args
if self.custom_args:
cmd = '%s %s' % (cmd, self.custom_args) # depends on [control=['if'], data=[]]
return cmd |
def index(self, attr):
"""
Indexes the :class:`.Paper`\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute.
"""
for i, paper in self.indexed_papers.iteritems():
self.index_paper_by_attr(paper, attr) | def function[index, parameter[self, attr]]:
constant[
Indexes the :class:`.Paper`\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute.
]
for taget[tuple[[<ast.Name object at 0x7da1b11f8f40>, <ast.Name object at 0x7da1b11f9120>]]] in starred[call[name[self].indexed_papers.iteritems, parameter[]]] begin[:]
call[name[self].index_paper_by_attr, parameter[name[paper], name[attr]]] | keyword[def] identifier[index] ( identifier[self] , identifier[attr] ):
literal[string]
keyword[for] identifier[i] , identifier[paper] keyword[in] identifier[self] . identifier[indexed_papers] . identifier[iteritems] ():
identifier[self] . identifier[index_paper_by_attr] ( identifier[paper] , identifier[attr] ) | def index(self, attr):
"""
Indexes the :class:`.Paper`\\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute.
"""
for (i, paper) in self.indexed_papers.iteritems():
self.index_paper_by_attr(paper, attr) # depends on [control=['for'], data=[]] |
def last(self) -> Signature:
""" Retrieve the last Signature ordered by mangling descendant """
k = sorted(self._hsig.keys())
return self._hsig[k[-1]] | def function[last, parameter[self]]:
constant[ Retrieve the last Signature ordered by mangling descendant ]
variable[k] assign[=] call[name[sorted], parameter[call[name[self]._hsig.keys, parameter[]]]]
return[call[name[self]._hsig][call[name[k]][<ast.UnaryOp object at 0x7da1b0146a70>]]] | keyword[def] identifier[last] ( identifier[self] )-> identifier[Signature] :
literal[string]
identifier[k] = identifier[sorted] ( identifier[self] . identifier[_hsig] . identifier[keys] ())
keyword[return] identifier[self] . identifier[_hsig] [ identifier[k] [- literal[int] ]] | def last(self) -> Signature:
""" Retrieve the last Signature ordered by mangling descendant """
k = sorted(self._hsig.keys())
return self._hsig[k[-1]] |
def load_itasser_folder(self, ident, itasser_folder, organize=False, outdir=None, organize_name=None,
set_as_representative=False, representative_chain='X', force_rerun=False):
"""Load the results folder from an I-TASSER run (local, not from the website) and copy relevant files over to
the protein structures directory.
Args:
ident (str): I-TASSER ID
itasser_folder (str): Path to results folder
organize (bool): If select files from modeling should be copied to the Protein directory
outdir (str): Path to directory where files will be copied and organized to
organize_name (str): Basename of files to rename results to. If not provided, will use id attribute.
set_as_representative: If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
ITASSERProp: The object that is now contained in the structures attribute
"""
if organize:
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Directory to copy results to must be specified')
if self.structures.has_id(ident):
if force_rerun:
existing = self.structures.get_by_id(ident)
self.structures.remove(existing)
else:
log.debug('{}: already present in list of structures'.format(ident))
itasser = self.structures.get_by_id(ident)
if not self.structures.has_id(ident):
itasser = ITASSERProp(ident, itasser_folder)
self.structures.append(itasser)
if set_as_representative:
self._representative_structure_setter(structprop=itasser, keep_chain=representative_chain, force_rerun=force_rerun)
if organize:
if itasser.structure_file:
# The name of the actual pdb file will be $GENEID_model1.pdb
if not organize_name:
new_itasser_name = self.id + '_model1'
else:
new_itasser_name = organize_name
# Additional results will be stored in a subdirectory
dest_itasser_extra_dir = op.join(outdir, '{}_itasser'.format(new_itasser_name))
ssbio.utils.make_dir(dest_itasser_extra_dir)
# Copy the model1.pdb and also create summary dataframes
itasser.copy_results(copy_to_dir=outdir, rename_model_to=new_itasser_name, force_rerun=force_rerun)
return self.structures.get_by_id(ident) | def function[load_itasser_folder, parameter[self, ident, itasser_folder, organize, outdir, organize_name, set_as_representative, representative_chain, force_rerun]]:
constant[Load the results folder from an I-TASSER run (local, not from the website) and copy relevant files over to
the protein structures directory.
Args:
ident (str): I-TASSER ID
itasser_folder (str): Path to results folder
organize (bool): If select files from modeling should be copied to the Protein directory
outdir (str): Path to directory where files will be copied and organized to
organize_name (str): Basename of files to rename results to. If not provided, will use id attribute.
set_as_representative: If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
ITASSERProp: The object that is now contained in the structures attribute
]
if name[organize] begin[:]
if <ast.UnaryOp object at 0x7da1b0e2cc70> begin[:]
variable[outdir] assign[=] name[self].structure_dir
if <ast.UnaryOp object at 0x7da1b0e2dd50> begin[:]
<ast.Raise object at 0x7da1b0e2df60>
if call[name[self].structures.has_id, parameter[name[ident]]] begin[:]
if name[force_rerun] begin[:]
variable[existing] assign[=] call[name[self].structures.get_by_id, parameter[name[ident]]]
call[name[self].structures.remove, parameter[name[existing]]]
if <ast.UnaryOp object at 0x7da1b0e82f50> begin[:]
variable[itasser] assign[=] call[name[ITASSERProp], parameter[name[ident], name[itasser_folder]]]
call[name[self].structures.append, parameter[name[itasser]]]
if name[set_as_representative] begin[:]
call[name[self]._representative_structure_setter, parameter[]]
if name[organize] begin[:]
if name[itasser].structure_file begin[:]
if <ast.UnaryOp object at 0x7da1b0e833d0> begin[:]
variable[new_itasser_name] assign[=] binary_operation[name[self].id + constant[_model1]]
variable[dest_itasser_extra_dir] assign[=] call[name[op].join, parameter[name[outdir], call[constant[{}_itasser].format, parameter[name[new_itasser_name]]]]]
call[name[ssbio].utils.make_dir, parameter[name[dest_itasser_extra_dir]]]
call[name[itasser].copy_results, parameter[]]
return[call[name[self].structures.get_by_id, parameter[name[ident]]]] | keyword[def] identifier[load_itasser_folder] ( identifier[self] , identifier[ident] , identifier[itasser_folder] , identifier[organize] = keyword[False] , identifier[outdir] = keyword[None] , identifier[organize_name] = keyword[None] ,
identifier[set_as_representative] = keyword[False] , identifier[representative_chain] = literal[string] , identifier[force_rerun] = keyword[False] ):
literal[string]
keyword[if] identifier[organize] :
keyword[if] keyword[not] identifier[outdir] :
identifier[outdir] = identifier[self] . identifier[structure_dir]
keyword[if] keyword[not] identifier[outdir] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[structures] . identifier[has_id] ( identifier[ident] ):
keyword[if] identifier[force_rerun] :
identifier[existing] = identifier[self] . identifier[structures] . identifier[get_by_id] ( identifier[ident] )
identifier[self] . identifier[structures] . identifier[remove] ( identifier[existing] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[ident] ))
identifier[itasser] = identifier[self] . identifier[structures] . identifier[get_by_id] ( identifier[ident] )
keyword[if] keyword[not] identifier[self] . identifier[structures] . identifier[has_id] ( identifier[ident] ):
identifier[itasser] = identifier[ITASSERProp] ( identifier[ident] , identifier[itasser_folder] )
identifier[self] . identifier[structures] . identifier[append] ( identifier[itasser] )
keyword[if] identifier[set_as_representative] :
identifier[self] . identifier[_representative_structure_setter] ( identifier[structprop] = identifier[itasser] , identifier[keep_chain] = identifier[representative_chain] , identifier[force_rerun] = identifier[force_rerun] )
keyword[if] identifier[organize] :
keyword[if] identifier[itasser] . identifier[structure_file] :
keyword[if] keyword[not] identifier[organize_name] :
identifier[new_itasser_name] = identifier[self] . identifier[id] + literal[string]
keyword[else] :
identifier[new_itasser_name] = identifier[organize_name]
identifier[dest_itasser_extra_dir] = identifier[op] . identifier[join] ( identifier[outdir] , literal[string] . identifier[format] ( identifier[new_itasser_name] ))
identifier[ssbio] . identifier[utils] . identifier[make_dir] ( identifier[dest_itasser_extra_dir] )
identifier[itasser] . identifier[copy_results] ( identifier[copy_to_dir] = identifier[outdir] , identifier[rename_model_to] = identifier[new_itasser_name] , identifier[force_rerun] = identifier[force_rerun] )
keyword[return] identifier[self] . identifier[structures] . identifier[get_by_id] ( identifier[ident] ) | def load_itasser_folder(self, ident, itasser_folder, organize=False, outdir=None, organize_name=None, set_as_representative=False, representative_chain='X', force_rerun=False):
"""Load the results folder from an I-TASSER run (local, not from the website) and copy relevant files over to
the protein structures directory.
Args:
ident (str): I-TASSER ID
itasser_folder (str): Path to results folder
organize (bool): If select files from modeling should be copied to the Protein directory
outdir (str): Path to directory where files will be copied and organized to
organize_name (str): Basename of files to rename results to. If not provided, will use id attribute.
set_as_representative: If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
ITASSERProp: The object that is now contained in the structures attribute
"""
if organize:
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Directory to copy results to must be specified') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.structures.has_id(ident):
if force_rerun:
existing = self.structures.get_by_id(ident)
self.structures.remove(existing) # depends on [control=['if'], data=[]]
else:
log.debug('{}: already present in list of structures'.format(ident))
itasser = self.structures.get_by_id(ident) # depends on [control=['if'], data=[]]
if not self.structures.has_id(ident):
itasser = ITASSERProp(ident, itasser_folder)
self.structures.append(itasser) # depends on [control=['if'], data=[]]
if set_as_representative:
self._representative_structure_setter(structprop=itasser, keep_chain=representative_chain, force_rerun=force_rerun) # depends on [control=['if'], data=[]]
if organize:
if itasser.structure_file:
# The name of the actual pdb file will be $GENEID_model1.pdb
if not organize_name:
new_itasser_name = self.id + '_model1' # depends on [control=['if'], data=[]]
else:
new_itasser_name = organize_name
# Additional results will be stored in a subdirectory
dest_itasser_extra_dir = op.join(outdir, '{}_itasser'.format(new_itasser_name))
ssbio.utils.make_dir(dest_itasser_extra_dir)
# Copy the model1.pdb and also create summary dataframes
itasser.copy_results(copy_to_dir=outdir, rename_model_to=new_itasser_name, force_rerun=force_rerun) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self.structures.get_by_id(ident) |
def update_throughput(self, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
self.layer2.update_throughput(self, read_units, write_units) | def function[update_throughput, parameter[self, read_units, write_units]]:
constant[
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
]
call[name[self].layer2.update_throughput, parameter[name[self], name[read_units], name[write_units]]] | keyword[def] identifier[update_throughput] ( identifier[self] , identifier[read_units] , identifier[write_units] ):
literal[string]
identifier[self] . identifier[layer2] . identifier[update_throughput] ( identifier[self] , identifier[read_units] , identifier[write_units] ) | def update_throughput(self, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
self.layer2.update_throughput(self, read_units, write_units) |
def notify(self, num=1):
"""wake one or more waiting greenlets
:param num: the number of waiters to wake (default 1)
:type num: int
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
for i in xrange(min(num, len(self._waiters))):
scheduler.state.awoken_from_events.add(self._waiters.popleft()[0]) | def function[notify, parameter[self, num]]:
constant[wake one or more waiting greenlets
:param num: the number of waiters to wake (default 1)
:type num: int
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
]
if <ast.UnaryOp object at 0x7da2046233d0> begin[:]
<ast.Raise object at 0x7da18bc70160>
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[min], parameter[name[num], call[name[len], parameter[name[self]._waiters]]]]]]] begin[:]
call[name[scheduler].state.awoken_from_events.add, parameter[call[call[name[self]._waiters.popleft, parameter[]]][constant[0]]]] | keyword[def] identifier[notify] ( identifier[self] , identifier[num] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_owned] ():
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[min] ( identifier[num] , identifier[len] ( identifier[self] . identifier[_waiters] ))):
identifier[scheduler] . identifier[state] . identifier[awoken_from_events] . identifier[add] ( identifier[self] . identifier[_waiters] . identifier[popleft] ()[ literal[int] ]) | def notify(self, num=1):
"""wake one or more waiting greenlets
:param num: the number of waiters to wake (default 1)
:type num: int
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
"""
if not self._is_owned():
raise RuntimeError('cannot wait on un-acquired lock') # depends on [control=['if'], data=[]]
for i in xrange(min(num, len(self._waiters))):
scheduler.state.awoken_from_events.add(self._waiters.popleft()[0]) # depends on [control=['for'], data=[]] |
def _infer_x_for_line(y):
"""
Infers the x for a line if no x is provided.
"""
array_shape = shape(y)
if len(array_shape) == 0:
return []
if len(array_shape) == 1:
return arange(array_shape[0])
if len(array_shape) > 1:
return arange(array_shape[1]) | def function[_infer_x_for_line, parameter[y]]:
constant[
Infers the x for a line if no x is provided.
]
variable[array_shape] assign[=] call[name[shape], parameter[name[y]]]
if compare[call[name[len], parameter[name[array_shape]]] equal[==] constant[0]] begin[:]
return[list[[]]]
if compare[call[name[len], parameter[name[array_shape]]] equal[==] constant[1]] begin[:]
return[call[name[arange], parameter[call[name[array_shape]][constant[0]]]]]
if compare[call[name[len], parameter[name[array_shape]]] greater[>] constant[1]] begin[:]
return[call[name[arange], parameter[call[name[array_shape]][constant[1]]]]] | keyword[def] identifier[_infer_x_for_line] ( identifier[y] ):
literal[string]
identifier[array_shape] = identifier[shape] ( identifier[y] )
keyword[if] identifier[len] ( identifier[array_shape] )== literal[int] :
keyword[return] []
keyword[if] identifier[len] ( identifier[array_shape] )== literal[int] :
keyword[return] identifier[arange] ( identifier[array_shape] [ literal[int] ])
keyword[if] identifier[len] ( identifier[array_shape] )> literal[int] :
keyword[return] identifier[arange] ( identifier[array_shape] [ literal[int] ]) | def _infer_x_for_line(y):
"""
Infers the x for a line if no x is provided.
"""
array_shape = shape(y)
if len(array_shape) == 0:
return [] # depends on [control=['if'], data=[]]
if len(array_shape) == 1:
return arange(array_shape[0]) # depends on [control=['if'], data=[]]
if len(array_shape) > 1:
return arange(array_shape[1]) # depends on [control=['if'], data=[]] |
def get_usrgos_w_parents(self, hdrgos, usrgos_all=None):
"""Get usrgos w/parents in hdrgos, even if usrgos did not get grouped under hdrgos."""
usrgos = set()
_go2parents = self.gosubdag.rcntobj.go2parents
if usrgos_all is None:
usrgos_all = self.usrgos
for usrgo in usrgos_all:
all_usrgo_parents = _go2parents.get(usrgo)
sel_usrgo_parents = all_usrgo_parents.intersection(hdrgos)
if sel_usrgo_parents:
usrgos.add(usrgo)
return usrgos | def function[get_usrgos_w_parents, parameter[self, hdrgos, usrgos_all]]:
constant[Get usrgos w/parents in hdrgos, even if usrgos did not get grouped under hdrgos.]
variable[usrgos] assign[=] call[name[set], parameter[]]
variable[_go2parents] assign[=] name[self].gosubdag.rcntobj.go2parents
if compare[name[usrgos_all] is constant[None]] begin[:]
variable[usrgos_all] assign[=] name[self].usrgos
for taget[name[usrgo]] in starred[name[usrgos_all]] begin[:]
variable[all_usrgo_parents] assign[=] call[name[_go2parents].get, parameter[name[usrgo]]]
variable[sel_usrgo_parents] assign[=] call[name[all_usrgo_parents].intersection, parameter[name[hdrgos]]]
if name[sel_usrgo_parents] begin[:]
call[name[usrgos].add, parameter[name[usrgo]]]
return[name[usrgos]] | keyword[def] identifier[get_usrgos_w_parents] ( identifier[self] , identifier[hdrgos] , identifier[usrgos_all] = keyword[None] ):
literal[string]
identifier[usrgos] = identifier[set] ()
identifier[_go2parents] = identifier[self] . identifier[gosubdag] . identifier[rcntobj] . identifier[go2parents]
keyword[if] identifier[usrgos_all] keyword[is] keyword[None] :
identifier[usrgos_all] = identifier[self] . identifier[usrgos]
keyword[for] identifier[usrgo] keyword[in] identifier[usrgos_all] :
identifier[all_usrgo_parents] = identifier[_go2parents] . identifier[get] ( identifier[usrgo] )
identifier[sel_usrgo_parents] = identifier[all_usrgo_parents] . identifier[intersection] ( identifier[hdrgos] )
keyword[if] identifier[sel_usrgo_parents] :
identifier[usrgos] . identifier[add] ( identifier[usrgo] )
keyword[return] identifier[usrgos] | def get_usrgos_w_parents(self, hdrgos, usrgos_all=None):
"""Get usrgos w/parents in hdrgos, even if usrgos did not get grouped under hdrgos."""
usrgos = set()
_go2parents = self.gosubdag.rcntobj.go2parents
if usrgos_all is None:
usrgos_all = self.usrgos # depends on [control=['if'], data=['usrgos_all']]
for usrgo in usrgos_all:
all_usrgo_parents = _go2parents.get(usrgo)
sel_usrgo_parents = all_usrgo_parents.intersection(hdrgos)
if sel_usrgo_parents:
usrgos.add(usrgo) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['usrgo']]
return usrgos |
async def _create_rev_reg(self, rr_id: str, rr_size: int = None) -> None:
"""
Create revocation registry and new tails file (and association to
corresponding revocation registry definition via symbolic link) for input
revocation registry identifier.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 256)
"""
LOGGER.debug('Issuer._create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
rr_size = rr_size or 256
(cd_id, tag) = rev_reg_id2cred_def_id__tag(rr_id)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer(
'default',
json.dumps({
'base_dir': Tails.dir(self._dir_tails, rr_id),
'uri_pattern': ''
}))
apriori = Tails.unlinked(self._dir_tails)
(rr_id, rrd_json, rre_json) = await anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
self.did,
'CL_ACCUM',
tag,
cd_id,
json.dumps({
'max_cred_num': rr_size,
'issuance_type': 'ISSUANCE_ON_DEMAND'
}),
tails_writer_handle)
delta = Tails.unlinked(self._dir_tails) - apriori
if len(delta) != 1:
LOGGER.debug(
'Issuer._create_rev_reg: <!< Could not create tails file for rev reg id: %s', rr_id)
raise CorruptTails('Could not create tails file for rev reg id {}'.format(rr_id))
tails_hash = basename(delta.pop())
Tails.associate(self._dir_tails, rr_id, tails_hash)
with REVO_CACHE.lock:
rrd_req_json = await ledger.build_revoc_reg_def_request(self.did, rrd_json)
await self._sign_submit(rrd_req_json)
await self._get_rev_reg_def(rr_id) # add to cache en passant
rre_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rre_json)
await self._sign_submit(rre_req_json)
LOGGER.debug('Issuer._create_rev_reg <<<') | <ast.AsyncFunctionDef object at 0x7da18f7239d0> | keyword[async] keyword[def] identifier[_create_rev_reg] ( identifier[self] , identifier[rr_id] : identifier[str] , identifier[rr_size] : identifier[int] = keyword[None] )-> keyword[None] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rr_id] , identifier[rr_size] )
identifier[rr_size] = identifier[rr_size] keyword[or] literal[int]
( identifier[cd_id] , identifier[tag] )= identifier[rev_reg_id2cred_def_id__tag] ( identifier[rr_id] )
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[rr_size] , identifier[rr_id] )
identifier[tails_writer_handle] = keyword[await] identifier[blob_storage] . identifier[open_writer] (
literal[string] ,
identifier[json] . identifier[dumps] ({
literal[string] : identifier[Tails] . identifier[dir] ( identifier[self] . identifier[_dir_tails] , identifier[rr_id] ),
literal[string] : literal[string]
}))
identifier[apriori] = identifier[Tails] . identifier[unlinked] ( identifier[self] . identifier[_dir_tails] )
( identifier[rr_id] , identifier[rrd_json] , identifier[rre_json] )= keyword[await] identifier[anoncreds] . identifier[issuer_create_and_store_revoc_reg] (
identifier[self] . identifier[wallet] . identifier[handle] ,
identifier[self] . identifier[did] ,
literal[string] ,
identifier[tag] ,
identifier[cd_id] ,
identifier[json] . identifier[dumps] ({
literal[string] : identifier[rr_size] ,
literal[string] : literal[string]
}),
identifier[tails_writer_handle] )
identifier[delta] = identifier[Tails] . identifier[unlinked] ( identifier[self] . identifier[_dir_tails] )- identifier[apriori]
keyword[if] identifier[len] ( identifier[delta] )!= literal[int] :
identifier[LOGGER] . identifier[debug] (
literal[string] , identifier[rr_id] )
keyword[raise] identifier[CorruptTails] ( literal[string] . identifier[format] ( identifier[rr_id] ))
identifier[tails_hash] = identifier[basename] ( identifier[delta] . identifier[pop] ())
identifier[Tails] . identifier[associate] ( identifier[self] . identifier[_dir_tails] , identifier[rr_id] , identifier[tails_hash] )
keyword[with] identifier[REVO_CACHE] . identifier[lock] :
identifier[rrd_req_json] = keyword[await] identifier[ledger] . identifier[build_revoc_reg_def_request] ( identifier[self] . identifier[did] , identifier[rrd_json] )
keyword[await] identifier[self] . identifier[_sign_submit] ( identifier[rrd_req_json] )
keyword[await] identifier[self] . identifier[_get_rev_reg_def] ( identifier[rr_id] )
identifier[rre_req_json] = keyword[await] identifier[ledger] . identifier[build_revoc_reg_entry_request] ( identifier[self] . identifier[did] , identifier[rr_id] , literal[string] , identifier[rre_json] )
keyword[await] identifier[self] . identifier[_sign_submit] ( identifier[rre_req_json] )
identifier[LOGGER] . identifier[debug] ( literal[string] ) | async def _create_rev_reg(self, rr_id: str, rr_size: int=None) -> None:
"""
Create revocation registry and new tails file (and association to
corresponding revocation registry definition via symbolic link) for input
revocation registry identifier.
:param rr_id: revocation registry identifier
:param rr_size: revocation registry size (defaults to 256)
"""
LOGGER.debug('Issuer._create_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size)
rr_size = rr_size or 256
(cd_id, tag) = rev_reg_id2cred_def_id__tag(rr_id)
LOGGER.info('Creating revocation registry (capacity %s) for rev reg id %s', rr_size, rr_id)
tails_writer_handle = await blob_storage.open_writer('default', json.dumps({'base_dir': Tails.dir(self._dir_tails, rr_id), 'uri_pattern': ''}))
apriori = Tails.unlinked(self._dir_tails)
(rr_id, rrd_json, rre_json) = await anoncreds.issuer_create_and_store_revoc_reg(self.wallet.handle, self.did, 'CL_ACCUM', tag, cd_id, json.dumps({'max_cred_num': rr_size, 'issuance_type': 'ISSUANCE_ON_DEMAND'}), tails_writer_handle)
delta = Tails.unlinked(self._dir_tails) - apriori
if len(delta) != 1:
LOGGER.debug('Issuer._create_rev_reg: <!< Could not create tails file for rev reg id: %s', rr_id)
raise CorruptTails('Could not create tails file for rev reg id {}'.format(rr_id)) # depends on [control=['if'], data=[]]
tails_hash = basename(delta.pop())
Tails.associate(self._dir_tails, rr_id, tails_hash)
with REVO_CACHE.lock:
rrd_req_json = await ledger.build_revoc_reg_def_request(self.did, rrd_json)
await self._sign_submit(rrd_req_json)
await self._get_rev_reg_def(rr_id) # add to cache en passant # depends on [control=['with'], data=[]]
rre_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rre_json)
await self._sign_submit(rre_req_json)
LOGGER.debug('Issuer._create_rev_reg <<<') |
def _serialize(
self,
element, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value to the element."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
# A value is only considered missing, and hence eligible to be replaced by its
# default only if it is None. Falsey values are not considered missing and are
# not replaced by the default.
if xml_value is None:
if self._default is None:
serialized_value = Text('')
else:
serialized_value = Text(self._default)
else:
serialized_value = Text(xml_value)
if self._attribute:
element.set(self._attribute, serialized_value)
else:
element.text = serialized_value | def function[_serialize, parameter[self, element, value, state]]:
constant[Serialize the value to the element.]
variable[xml_value] assign[=] call[name[_hooks_apply_before_serialize], parameter[name[self]._hooks, name[state], name[value]]]
if compare[name[xml_value] is constant[None]] begin[:]
if compare[name[self]._default is constant[None]] begin[:]
variable[serialized_value] assign[=] call[name[Text], parameter[constant[]]]
if name[self]._attribute begin[:]
call[name[element].set, parameter[name[self]._attribute, name[serialized_value]]] | keyword[def] identifier[_serialize] (
identifier[self] ,
identifier[element] ,
identifier[value] ,
identifier[state]
):
literal[string]
identifier[xml_value] = identifier[_hooks_apply_before_serialize] ( identifier[self] . identifier[_hooks] , identifier[state] , identifier[value] )
keyword[if] identifier[xml_value] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[_default] keyword[is] keyword[None] :
identifier[serialized_value] = identifier[Text] ( literal[string] )
keyword[else] :
identifier[serialized_value] = identifier[Text] ( identifier[self] . identifier[_default] )
keyword[else] :
identifier[serialized_value] = identifier[Text] ( identifier[xml_value] )
keyword[if] identifier[self] . identifier[_attribute] :
identifier[element] . identifier[set] ( identifier[self] . identifier[_attribute] , identifier[serialized_value] )
keyword[else] :
identifier[element] . identifier[text] = identifier[serialized_value] | def _serialize(self, element, value, state): # type: ET.Element
# type: Any
# type: _ProcessorState
# type: (...) -> None
'Serialize the value to the element.'
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
# A value is only considered missing, and hence eligible to be replaced by its
# default only if it is None. Falsey values are not considered missing and are
# not replaced by the default.
if xml_value is None:
if self._default is None:
serialized_value = Text('') # depends on [control=['if'], data=[]]
else:
serialized_value = Text(self._default) # depends on [control=['if'], data=[]]
else:
serialized_value = Text(xml_value)
if self._attribute:
element.set(self._attribute, serialized_value) # depends on [control=['if'], data=[]]
else:
element.text = serialized_value |
def _group(self, element):
"""Parses the XML element as a group of [unknown] number of lines."""
for v in _get_xml_version(element):
if "name" in element.attrib:
g = TemplateGroup(element, self.versions[v].comment)
self.versions[v].entries[g.identifier] = g
self.versions[v].order.append(g.identifier)
else:
msg.warn("no name element in {}. Ignored. (_group)".format(element)) | def function[_group, parameter[self, element]]:
constant[Parses the XML element as a group of [unknown] number of lines.]
for taget[name[v]] in starred[call[name[_get_xml_version], parameter[name[element]]]] begin[:]
if compare[constant[name] in name[element].attrib] begin[:]
variable[g] assign[=] call[name[TemplateGroup], parameter[name[element], call[name[self].versions][name[v]].comment]]
call[call[name[self].versions][name[v]].entries][name[g].identifier] assign[=] name[g]
call[call[name[self].versions][name[v]].order.append, parameter[name[g].identifier]] | keyword[def] identifier[_group] ( identifier[self] , identifier[element] ):
literal[string]
keyword[for] identifier[v] keyword[in] identifier[_get_xml_version] ( identifier[element] ):
keyword[if] literal[string] keyword[in] identifier[element] . identifier[attrib] :
identifier[g] = identifier[TemplateGroup] ( identifier[element] , identifier[self] . identifier[versions] [ identifier[v] ]. identifier[comment] )
identifier[self] . identifier[versions] [ identifier[v] ]. identifier[entries] [ identifier[g] . identifier[identifier] ]= identifier[g]
identifier[self] . identifier[versions] [ identifier[v] ]. identifier[order] . identifier[append] ( identifier[g] . identifier[identifier] )
keyword[else] :
identifier[msg] . identifier[warn] ( literal[string] . identifier[format] ( identifier[element] )) | def _group(self, element):
"""Parses the XML element as a group of [unknown] number of lines."""
for v in _get_xml_version(element):
if 'name' in element.attrib:
g = TemplateGroup(element, self.versions[v].comment)
self.versions[v].entries[g.identifier] = g
self.versions[v].order.append(g.identifier) # depends on [control=['if'], data=[]]
else:
msg.warn('no name element in {}. Ignored. (_group)'.format(element)) # depends on [control=['for'], data=['v']] |
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, six.string_types):
if named not in self.tags:
raise InvalidArgument(
"{} is not a valid tag for this image".format(named)
)
img = named
return self.client.api.get_image(img, chunk_size) | def function[save, parameter[self, chunk_size, named]]:
constant[
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
]
variable[img] assign[=] name[self].id
if name[named] begin[:]
variable[img] assign[=] <ast.IfExp object at 0x7da18f00ea40>
if call[name[isinstance], parameter[name[named], name[six].string_types]] begin[:]
if compare[name[named] <ast.NotIn object at 0x7da2590d7190> name[self].tags] begin[:]
<ast.Raise object at 0x7da18f00c310>
variable[img] assign[=] name[named]
return[call[name[self].client.api.get_image, parameter[name[img], name[chunk_size]]]] | keyword[def] identifier[save] ( identifier[self] , identifier[chunk_size] = identifier[DEFAULT_DATA_CHUNK_SIZE] , identifier[named] = keyword[False] ):
literal[string]
identifier[img] = identifier[self] . identifier[id]
keyword[if] identifier[named] :
identifier[img] = identifier[self] . identifier[tags] [ literal[int] ] keyword[if] identifier[self] . identifier[tags] keyword[else] identifier[img]
keyword[if] identifier[isinstance] ( identifier[named] , identifier[six] . identifier[string_types] ):
keyword[if] identifier[named] keyword[not] keyword[in] identifier[self] . identifier[tags] :
keyword[raise] identifier[InvalidArgument] (
literal[string] . identifier[format] ( identifier[named] )
)
identifier[img] = identifier[named]
keyword[return] identifier[self] . identifier[client] . identifier[api] . identifier[get_image] ( identifier[img] , identifier[chunk_size] ) | def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, six.string_types):
if named not in self.tags:
raise InvalidArgument('{} is not a valid tag for this image'.format(named)) # depends on [control=['if'], data=['named']]
img = named # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self.client.api.get_image(img, chunk_size) |
def append_vector(self, name, vector, _left=False):
"""Add a data vectors column."""
if np.issubdtype(vector.dtype, np.integer):
# determine the length we need
largest = str(max(vector.max(), vector.min(), key=abs))
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return str(getattr(datum, name)).rjust(length)
elif np.issubdtype(vector.dtype, np.floating):
largest = np.format_float_positional(max(vector.max(), vector.min(), key=abs),
precision=6, trim='0')
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
return np.format_float_positional(getattr(datum, name),
precision=6, trim='0',
).rjust(length)
else:
length = 7
if len(name) > length:
header = name[:length-1] + '.'
else:
header = name.rjust(length)
def f(datum):
r = repr(getattr(datum, name))
if len(r) > length:
r = r[:length-3] + '...'
return r.rjust(length)
self.append(header, f, _left=_left) | def function[append_vector, parameter[self, name, vector, _left]]:
constant[Add a data vectors column.]
if call[name[np].issubdtype, parameter[name[vector].dtype, name[np].integer]] begin[:]
variable[largest] assign[=] call[name[str], parameter[call[name[max], parameter[call[name[vector].max, parameter[]], call[name[vector].min, parameter[]]]]]]
variable[length] assign[=] call[name[max], parameter[call[name[len], parameter[name[largest]]], call[name[min], parameter[constant[7], call[name[len], parameter[name[name]]]]]]]
if compare[call[name[len], parameter[name[name]]] greater[>] name[length]] begin[:]
variable[header] assign[=] binary_operation[call[name[name]][<ast.Slice object at 0x7da1b07afe80>] + constant[.]]
def function[f, parameter[datum]]:
return[call[call[name[str], parameter[call[name[getattr], parameter[name[datum], name[name]]]]].rjust, parameter[name[length]]]]
call[name[self].append, parameter[name[header], name[f]]] | keyword[def] identifier[append_vector] ( identifier[self] , identifier[name] , identifier[vector] , identifier[_left] = keyword[False] ):
literal[string]
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[vector] . identifier[dtype] , identifier[np] . identifier[integer] ):
identifier[largest] = identifier[str] ( identifier[max] ( identifier[vector] . identifier[max] (), identifier[vector] . identifier[min] (), identifier[key] = identifier[abs] ))
identifier[length] = identifier[max] ( identifier[len] ( identifier[largest] ), identifier[min] ( literal[int] , identifier[len] ( identifier[name] )))
keyword[if] identifier[len] ( identifier[name] )> identifier[length] :
identifier[header] = identifier[name] [: identifier[length] - literal[int] ]+ literal[string]
keyword[else] :
identifier[header] = identifier[name] . identifier[rjust] ( identifier[length] )
keyword[def] identifier[f] ( identifier[datum] ):
keyword[return] identifier[str] ( identifier[getattr] ( identifier[datum] , identifier[name] )). identifier[rjust] ( identifier[length] )
keyword[elif] identifier[np] . identifier[issubdtype] ( identifier[vector] . identifier[dtype] , identifier[np] . identifier[floating] ):
identifier[largest] = identifier[np] . identifier[format_float_positional] ( identifier[max] ( identifier[vector] . identifier[max] (), identifier[vector] . identifier[min] (), identifier[key] = identifier[abs] ),
identifier[precision] = literal[int] , identifier[trim] = literal[string] )
identifier[length] = identifier[max] ( identifier[len] ( identifier[largest] ), identifier[min] ( literal[int] , identifier[len] ( identifier[name] )))
keyword[if] identifier[len] ( identifier[name] )> identifier[length] :
identifier[header] = identifier[name] [: identifier[length] - literal[int] ]+ literal[string]
keyword[else] :
identifier[header] = identifier[name] . identifier[rjust] ( identifier[length] )
keyword[def] identifier[f] ( identifier[datum] ):
keyword[return] identifier[np] . identifier[format_float_positional] ( identifier[getattr] ( identifier[datum] , identifier[name] ),
identifier[precision] = literal[int] , identifier[trim] = literal[string] ,
). identifier[rjust] ( identifier[length] )
keyword[else] :
identifier[length] = literal[int]
keyword[if] identifier[len] ( identifier[name] )> identifier[length] :
identifier[header] = identifier[name] [: identifier[length] - literal[int] ]+ literal[string]
keyword[else] :
identifier[header] = identifier[name] . identifier[rjust] ( identifier[length] )
keyword[def] identifier[f] ( identifier[datum] ):
identifier[r] = identifier[repr] ( identifier[getattr] ( identifier[datum] , identifier[name] ))
keyword[if] identifier[len] ( identifier[r] )> identifier[length] :
identifier[r] = identifier[r] [: identifier[length] - literal[int] ]+ literal[string]
keyword[return] identifier[r] . identifier[rjust] ( identifier[length] )
identifier[self] . identifier[append] ( identifier[header] , identifier[f] , identifier[_left] = identifier[_left] ) | def append_vector(self, name, vector, _left=False):
"""Add a data vectors column."""
if np.issubdtype(vector.dtype, np.integer):
# determine the length we need
largest = str(max(vector.max(), vector.min(), key=abs))
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length - 1] + '.' # depends on [control=['if'], data=['length']]
else:
header = name.rjust(length)
def f(datum):
return str(getattr(datum, name)).rjust(length) # depends on [control=['if'], data=[]]
elif np.issubdtype(vector.dtype, np.floating):
largest = np.format_float_positional(max(vector.max(), vector.min(), key=abs), precision=6, trim='0')
length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
if len(name) > length:
header = name[:length - 1] + '.' # depends on [control=['if'], data=['length']]
else:
header = name.rjust(length)
def f(datum):
return np.format_float_positional(getattr(datum, name), precision=6, trim='0').rjust(length) # depends on [control=['if'], data=[]]
else:
length = 7
if len(name) > length:
header = name[:length - 1] + '.' # depends on [control=['if'], data=['length']]
else:
header = name.rjust(length)
def f(datum):
r = repr(getattr(datum, name))
if len(r) > length:
r = r[:length - 3] + '...' # depends on [control=['if'], data=['length']]
return r.rjust(length)
self.append(header, f, _left=_left) |
def run_command(cmd, debug=False):
"""
Execute the given command and returns None.
:param cmd: A ``sh.Command`` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
"""
if debug:
# WARN(retr0h): Uses an internal ``sh`` data structure to dig
# the environment out of the ``sh.command`` object.
print_environment_vars(cmd._partial_call_args.get('env', {}))
print_debug('COMMAND', str(cmd))
print()
return cmd(_truncate_exc=False) | def function[run_command, parameter[cmd, debug]]:
constant[
Execute the given command and returns None.
:param cmd: A ``sh.Command`` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
]
if name[debug] begin[:]
call[name[print_environment_vars], parameter[call[name[cmd]._partial_call_args.get, parameter[constant[env], dictionary[[], []]]]]]
call[name[print_debug], parameter[constant[COMMAND], call[name[str], parameter[name[cmd]]]]]
call[name[print], parameter[]]
return[call[name[cmd], parameter[]]] | keyword[def] identifier[run_command] ( identifier[cmd] , identifier[debug] = keyword[False] ):
literal[string]
keyword[if] identifier[debug] :
identifier[print_environment_vars] ( identifier[cmd] . identifier[_partial_call_args] . identifier[get] ( literal[string] ,{}))
identifier[print_debug] ( literal[string] , identifier[str] ( identifier[cmd] ))
identifier[print] ()
keyword[return] identifier[cmd] ( identifier[_truncate_exc] = keyword[False] ) | def run_command(cmd, debug=False):
"""
Execute the given command and returns None.
:param cmd: A ``sh.Command`` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
"""
if debug:
# WARN(retr0h): Uses an internal ``sh`` data structure to dig
# the environment out of the ``sh.command`` object.
print_environment_vars(cmd._partial_call_args.get('env', {}))
print_debug('COMMAND', str(cmd))
print() # depends on [control=['if'], data=[]]
return cmd(_truncate_exc=False) |
def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
dbo.drop_collection(self.__name, session=session) | def function[drop, parameter[self, session]]:
constant[Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
]
variable[dbo] assign[=] call[name[self].__database.client.get_database, parameter[name[self].__database.name, name[self].codec_options, name[self].read_preference, name[self].write_concern, name[self].read_concern]]
call[name[dbo].drop_collection, parameter[name[self].__name]] | keyword[def] identifier[drop] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
identifier[dbo] = identifier[self] . identifier[__database] . identifier[client] . identifier[get_database] (
identifier[self] . identifier[__database] . identifier[name] ,
identifier[self] . identifier[codec_options] ,
identifier[self] . identifier[read_preference] ,
identifier[self] . identifier[write_concern] ,
identifier[self] . identifier[read_concern] )
identifier[dbo] . identifier[drop_collection] ( identifier[self] . identifier[__name] , identifier[session] = identifier[session] ) | def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(self.__database.name, self.codec_options, self.read_preference, self.write_concern, self.read_concern)
dbo.drop_collection(self.__name, session=session) |
def _set_interface_refresh_reduction(self, v, load=False):
"""
Setter method for interface_refresh_reduction, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_refresh_reduction is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_refresh_reduction() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_refresh_reduction.interface_refresh_reduction, is_container='container', presence=False, yang_name="interface-refresh-reduction", rest_name="refresh-reduction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RSVP Refresh reduction on this interface', u'alt-name': u'refresh-reduction'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_refresh_reduction must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_refresh_reduction.interface_refresh_reduction, is_container='container', presence=False, yang_name="interface-refresh-reduction", rest_name="refresh-reduction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RSVP Refresh reduction on this interface', u'alt-name': u'refresh-reduction'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__interface_refresh_reduction = t
if hasattr(self, '_set'):
self._set() | def function[_set_interface_refresh_reduction, parameter[self, v, load]]:
constant[
Setter method for interface_refresh_reduction, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_refresh_reduction is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_refresh_reduction() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18eb546d0>
name[self].__interface_refresh_reduction assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_interface_refresh_reduction] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[interface_refresh_reduction] . identifier[interface_refresh_reduction] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__interface_refresh_reduction] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_interface_refresh_reduction(self, v, load=False):
"""
Setter method for interface_refresh_reduction, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_refresh_reduction is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_refresh_reduction() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=interface_refresh_reduction.interface_refresh_reduction, is_container='container', presence=False, yang_name='interface-refresh-reduction', rest_name='refresh-reduction', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RSVP Refresh reduction on this interface', u'alt-name': u'refresh-reduction'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'interface_refresh_reduction must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=interface_refresh_reduction.interface_refresh_reduction, is_container=\'container\', presence=False, yang_name="interface-refresh-reduction", rest_name="refresh-reduction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure RSVP Refresh reduction on this interface\', u\'alt-name\': u\'refresh-reduction\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__interface_refresh_reduction = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def _lazy_load(self):
"""
Fetch metadata if it was overlooked during the object's creation.
This can happen when you retrieve documents via search, because
the JSON response does not include complete meta data for all
results.
"""
obj = self._connection.documents.get(id=self.id)
self.__dict__['contributor'] = obj.contributor
self.__dict__['contributor_organization'] = \
obj.contributor_organization
self.__dict__['data'] = obj.data
self.__dict__['annotations'] = obj.__dict__['annotations']
self.__dict__['sections'] = obj.__dict__['sections'] | def function[_lazy_load, parameter[self]]:
constant[
Fetch metadata if it was overlooked during the object's creation.
This can happen when you retrieve documents via search, because
the JSON response does not include complete meta data for all
results.
]
variable[obj] assign[=] call[name[self]._connection.documents.get, parameter[]]
call[name[self].__dict__][constant[contributor]] assign[=] name[obj].contributor
call[name[self].__dict__][constant[contributor_organization]] assign[=] name[obj].contributor_organization
call[name[self].__dict__][constant[data]] assign[=] name[obj].data
call[name[self].__dict__][constant[annotations]] assign[=] call[name[obj].__dict__][constant[annotations]]
call[name[self].__dict__][constant[sections]] assign[=] call[name[obj].__dict__][constant[sections]] | keyword[def] identifier[_lazy_load] ( identifier[self] ):
literal[string]
identifier[obj] = identifier[self] . identifier[_connection] . identifier[documents] . identifier[get] ( identifier[id] = identifier[self] . identifier[id] )
identifier[self] . identifier[__dict__] [ literal[string] ]= identifier[obj] . identifier[contributor]
identifier[self] . identifier[__dict__] [ literal[string] ]= identifier[obj] . identifier[contributor_organization]
identifier[self] . identifier[__dict__] [ literal[string] ]= identifier[obj] . identifier[data]
identifier[self] . identifier[__dict__] [ literal[string] ]= identifier[obj] . identifier[__dict__] [ literal[string] ]
identifier[self] . identifier[__dict__] [ literal[string] ]= identifier[obj] . identifier[__dict__] [ literal[string] ] | def _lazy_load(self):
"""
Fetch metadata if it was overlooked during the object's creation.
This can happen when you retrieve documents via search, because
the JSON response does not include complete meta data for all
results.
"""
obj = self._connection.documents.get(id=self.id)
self.__dict__['contributor'] = obj.contributor
self.__dict__['contributor_organization'] = obj.contributor_organization
self.__dict__['data'] = obj.data
self.__dict__['annotations'] = obj.__dict__['annotations']
self.__dict__['sections'] = obj.__dict__['sections'] |
def tag(path, name):
''' Change tag associated with directory. '''
output, err = cli_syncthing_adapter.tag(path, name)
click.echo("%s" % output, err=err) | def function[tag, parameter[path, name]]:
constant[ Change tag associated with directory. ]
<ast.Tuple object at 0x7da1b236b040> assign[=] call[name[cli_syncthing_adapter].tag, parameter[name[path], name[name]]]
call[name[click].echo, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[output]]]] | keyword[def] identifier[tag] ( identifier[path] , identifier[name] ):
literal[string]
identifier[output] , identifier[err] = identifier[cli_syncthing_adapter] . identifier[tag] ( identifier[path] , identifier[name] )
identifier[click] . identifier[echo] ( literal[string] % identifier[output] , identifier[err] = identifier[err] ) | def tag(path, name):
""" Change tag associated with directory. """
(output, err) = cli_syncthing_adapter.tag(path, name)
click.echo('%s' % output, err=err) |
def _find_template(parameters, index, required=False, notfoundmsg=None):
"""
Generate ``.find()`` call for HTMLElement.
Args:
parameters (list): List of parameters for ``.find()``.
index (int): Index of the item you want to get from ``.find()`` call.
required (bool, default False): Use :func:`_required_idiom` to returned
data.
notfoundmsg (str, default None): Message which will be used for
:func:`_required_idiom` if the item is not found.
Returns:
str: Python code.
Live example::
>>> print g._find_template(["<xex>"], 3)
el = dom.find('<xex>')
# pick element from list
el = el[3] if len(el) - 1 >= 3 else None
"""
output = IND + "el = dom.find(%s)\n\n" % repr(parameters)[1:-1]
if required:
return output + _required_idiom(parameters[0], index, notfoundmsg)
return output + _index_idiom("el", index) | def function[_find_template, parameter[parameters, index, required, notfoundmsg]]:
constant[
Generate ``.find()`` call for HTMLElement.
Args:
parameters (list): List of parameters for ``.find()``.
index (int): Index of the item you want to get from ``.find()`` call.
required (bool, default False): Use :func:`_required_idiom` to returned
data.
notfoundmsg (str, default None): Message which will be used for
:func:`_required_idiom` if the item is not found.
Returns:
str: Python code.
Live example::
>>> print g._find_template(["<xex>"], 3)
el = dom.find('<xex>')
# pick element from list
el = el[3] if len(el) - 1 >= 3 else None
]
variable[output] assign[=] binary_operation[name[IND] + binary_operation[constant[el = dom.find(%s)
] <ast.Mod object at 0x7da2590d6920> call[call[name[repr], parameter[name[parameters]]]][<ast.Slice object at 0x7da18bcc87c0>]]]
if name[required] begin[:]
return[binary_operation[name[output] + call[name[_required_idiom], parameter[call[name[parameters]][constant[0]], name[index], name[notfoundmsg]]]]]
return[binary_operation[name[output] + call[name[_index_idiom], parameter[constant[el], name[index]]]]] | keyword[def] identifier[_find_template] ( identifier[parameters] , identifier[index] , identifier[required] = keyword[False] , identifier[notfoundmsg] = keyword[None] ):
literal[string]
identifier[output] = identifier[IND] + literal[string] % identifier[repr] ( identifier[parameters] )[ literal[int] :- literal[int] ]
keyword[if] identifier[required] :
keyword[return] identifier[output] + identifier[_required_idiom] ( identifier[parameters] [ literal[int] ], identifier[index] , identifier[notfoundmsg] )
keyword[return] identifier[output] + identifier[_index_idiom] ( literal[string] , identifier[index] ) | def _find_template(parameters, index, required=False, notfoundmsg=None):
"""
Generate ``.find()`` call for HTMLElement.
Args:
parameters (list): List of parameters for ``.find()``.
index (int): Index of the item you want to get from ``.find()`` call.
required (bool, default False): Use :func:`_required_idiom` to returned
data.
notfoundmsg (str, default None): Message which will be used for
:func:`_required_idiom` if the item is not found.
Returns:
str: Python code.
Live example::
>>> print g._find_template(["<xex>"], 3)
el = dom.find('<xex>')
# pick element from list
el = el[3] if len(el) - 1 >= 3 else None
"""
output = IND + 'el = dom.find(%s)\n\n' % repr(parameters)[1:-1]
if required:
return output + _required_idiom(parameters[0], index, notfoundmsg) # depends on [control=['if'], data=[]]
return output + _index_idiom('el', index) |
def search_debit():
"""
Get one to ten debit(s) for a single User.
---
parameters:
- name: searchcd
in: body
description: The Debit(s) you'd like to get.
required: false
schema:
$ref: '#/definitions/SearchCD'
responses:
'200':
description: the User's debit(s)
schema:
items:
$ref: '#/definitions/Debit'
type: array
default:
description: unexpected error
schema:
$ref: '#/definitions/errorModel'
security:
- kid: []
- typ: []
- alg: []
operationId: searchDebits
"""
sid = request.jws_payload['data'].get('id')
address = request.jws_payload['data'].get('address')
currency = request.jws_payload['data'].get('currency')
network = request.jws_payload['data'].get('network')
#reference = request.jws_payload['data'].get('reference')
ref_id = request.jws_payload['data'].get('ref_id')
page = request.jws_payload['data'].get('page') or 0
debsq = ses.query(wm.Debit).filter(wm.Debit.user_id == current_user.id)
if not debsq:
return None
if sid:
debsq = debsq.filter(wm.Debit.id == sid)
if address:
debsq = debsq.filter(wm.Debit.address == address)
if currency:
debsq = debsq.filter(wm.Debit.currency == currency)
if network:
debsq = debsq.filter(wm.Debit.network == network)
#if reference:
# debsq = debsq.filter(wm.Debit.reference == reference)
if ref_id:
debsq = debsq.filter(wm.Debit.ref_id == ref_id)
debsq = debsq.order_by(wm.Debit.time.desc()).limit(10)
if page and isinstance(page, int):
debsq = debsq.offset(page * 10)
debits = [json.loads(jsonify2(d, 'Debit')) for d in debsq]
response = current_app.bitjws.create_response(debits)
ses.close()
return response | def function[search_debit, parameter[]]:
constant[
Get one to ten debit(s) for a single User.
---
parameters:
- name: searchcd
in: body
description: The Debit(s) you'd like to get.
required: false
schema:
$ref: '#/definitions/SearchCD'
responses:
'200':
description: the User's debit(s)
schema:
items:
$ref: '#/definitions/Debit'
type: array
default:
description: unexpected error
schema:
$ref: '#/definitions/errorModel'
security:
- kid: []
- typ: []
- alg: []
operationId: searchDebits
]
variable[sid] assign[=] call[call[name[request].jws_payload][constant[data]].get, parameter[constant[id]]]
variable[address] assign[=] call[call[name[request].jws_payload][constant[data]].get, parameter[constant[address]]]
variable[currency] assign[=] call[call[name[request].jws_payload][constant[data]].get, parameter[constant[currency]]]
variable[network] assign[=] call[call[name[request].jws_payload][constant[data]].get, parameter[constant[network]]]
variable[ref_id] assign[=] call[call[name[request].jws_payload][constant[data]].get, parameter[constant[ref_id]]]
variable[page] assign[=] <ast.BoolOp object at 0x7da2044c1de0>
variable[debsq] assign[=] call[call[name[ses].query, parameter[name[wm].Debit]].filter, parameter[compare[name[wm].Debit.user_id equal[==] name[current_user].id]]]
if <ast.UnaryOp object at 0x7da2044c1e10> begin[:]
return[constant[None]]
if name[sid] begin[:]
variable[debsq] assign[=] call[name[debsq].filter, parameter[compare[name[wm].Debit.id equal[==] name[sid]]]]
if name[address] begin[:]
variable[debsq] assign[=] call[name[debsq].filter, parameter[compare[name[wm].Debit.address equal[==] name[address]]]]
if name[currency] begin[:]
variable[debsq] assign[=] call[name[debsq].filter, parameter[compare[name[wm].Debit.currency equal[==] name[currency]]]]
if name[network] begin[:]
variable[debsq] assign[=] call[name[debsq].filter, parameter[compare[name[wm].Debit.network equal[==] name[network]]]]
if name[ref_id] begin[:]
variable[debsq] assign[=] call[name[debsq].filter, parameter[compare[name[wm].Debit.ref_id equal[==] name[ref_id]]]]
variable[debsq] assign[=] call[call[name[debsq].order_by, parameter[call[name[wm].Debit.time.desc, parameter[]]]].limit, parameter[constant[10]]]
if <ast.BoolOp object at 0x7da2044c27a0> begin[:]
variable[debsq] assign[=] call[name[debsq].offset, parameter[binary_operation[name[page] * constant[10]]]]
variable[debits] assign[=] <ast.ListComp object at 0x7da2044c0b50>
variable[response] assign[=] call[name[current_app].bitjws.create_response, parameter[name[debits]]]
call[name[ses].close, parameter[]]
return[name[response]] | keyword[def] identifier[search_debit] ():
literal[string]
identifier[sid] = identifier[request] . identifier[jws_payload] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[address] = identifier[request] . identifier[jws_payload] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[currency] = identifier[request] . identifier[jws_payload] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[network] = identifier[request] . identifier[jws_payload] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[ref_id] = identifier[request] . identifier[jws_payload] [ literal[string] ]. identifier[get] ( literal[string] )
identifier[page] = identifier[request] . identifier[jws_payload] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[or] literal[int]
identifier[debsq] = identifier[ses] . identifier[query] ( identifier[wm] . identifier[Debit] ). identifier[filter] ( identifier[wm] . identifier[Debit] . identifier[user_id] == identifier[current_user] . identifier[id] )
keyword[if] keyword[not] identifier[debsq] :
keyword[return] keyword[None]
keyword[if] identifier[sid] :
identifier[debsq] = identifier[debsq] . identifier[filter] ( identifier[wm] . identifier[Debit] . identifier[id] == identifier[sid] )
keyword[if] identifier[address] :
identifier[debsq] = identifier[debsq] . identifier[filter] ( identifier[wm] . identifier[Debit] . identifier[address] == identifier[address] )
keyword[if] identifier[currency] :
identifier[debsq] = identifier[debsq] . identifier[filter] ( identifier[wm] . identifier[Debit] . identifier[currency] == identifier[currency] )
keyword[if] identifier[network] :
identifier[debsq] = identifier[debsq] . identifier[filter] ( identifier[wm] . identifier[Debit] . identifier[network] == identifier[network] )
keyword[if] identifier[ref_id] :
identifier[debsq] = identifier[debsq] . identifier[filter] ( identifier[wm] . identifier[Debit] . identifier[ref_id] == identifier[ref_id] )
identifier[debsq] = identifier[debsq] . identifier[order_by] ( identifier[wm] . identifier[Debit] . identifier[time] . identifier[desc] ()). identifier[limit] ( literal[int] )
keyword[if] identifier[page] keyword[and] identifier[isinstance] ( identifier[page] , identifier[int] ):
identifier[debsq] = identifier[debsq] . identifier[offset] ( identifier[page] * literal[int] )
identifier[debits] =[ identifier[json] . identifier[loads] ( identifier[jsonify2] ( identifier[d] , literal[string] )) keyword[for] identifier[d] keyword[in] identifier[debsq] ]
identifier[response] = identifier[current_app] . identifier[bitjws] . identifier[create_response] ( identifier[debits] )
identifier[ses] . identifier[close] ()
keyword[return] identifier[response] | def search_debit():
"""
Get one to ten debit(s) for a single User.
---
parameters:
- name: searchcd
in: body
description: The Debit(s) you'd like to get.
required: false
schema:
$ref: '#/definitions/SearchCD'
responses:
'200':
description: the User's debit(s)
schema:
items:
$ref: '#/definitions/Debit'
type: array
default:
description: unexpected error
schema:
$ref: '#/definitions/errorModel'
security:
- kid: []
- typ: []
- alg: []
operationId: searchDebits
"""
sid = request.jws_payload['data'].get('id')
address = request.jws_payload['data'].get('address')
currency = request.jws_payload['data'].get('currency')
network = request.jws_payload['data'].get('network')
#reference = request.jws_payload['data'].get('reference')
ref_id = request.jws_payload['data'].get('ref_id')
page = request.jws_payload['data'].get('page') or 0
debsq = ses.query(wm.Debit).filter(wm.Debit.user_id == current_user.id)
if not debsq:
return None # depends on [control=['if'], data=[]]
if sid:
debsq = debsq.filter(wm.Debit.id == sid) # depends on [control=['if'], data=[]]
if address:
debsq = debsq.filter(wm.Debit.address == address) # depends on [control=['if'], data=[]]
if currency:
debsq = debsq.filter(wm.Debit.currency == currency) # depends on [control=['if'], data=[]]
if network:
debsq = debsq.filter(wm.Debit.network == network) # depends on [control=['if'], data=[]]
#if reference:
# debsq = debsq.filter(wm.Debit.reference == reference)
if ref_id:
debsq = debsq.filter(wm.Debit.ref_id == ref_id) # depends on [control=['if'], data=[]]
debsq = debsq.order_by(wm.Debit.time.desc()).limit(10)
if page and isinstance(page, int):
debsq = debsq.offset(page * 10) # depends on [control=['if'], data=[]]
debits = [json.loads(jsonify2(d, 'Debit')) for d in debsq]
response = current_app.bitjws.create_response(debits)
ses.close()
return response |
def manager(self, **kwargs):
"""Return a preference manager that can be used to retrieve preference values"""
return PreferencesManager(registry=self, model=self.preference_model, **kwargs) | def function[manager, parameter[self]]:
constant[Return a preference manager that can be used to retrieve preference values]
return[call[name[PreferencesManager], parameter[]]] | keyword[def] identifier[manager] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[PreferencesManager] ( identifier[registry] = identifier[self] , identifier[model] = identifier[self] . identifier[preference_model] ,** identifier[kwargs] ) | def manager(self, **kwargs):
"""Return a preference manager that can be used to retrieve preference values"""
return PreferencesManager(registry=self, model=self.preference_model, **kwargs) |
def debugPreview(self, title="Debug"):
""" Displays the region in a preview window.
If the region is a Match, circles the target area. If the region is larger than half the
primary screen in either dimension, scales it down to half size.
"""
region = self
haystack = self.getBitmap()
if isinstance(region, Match):
cv2.circle(
haystack,
(region.getTarget().x - self.x, region.getTarget().y - self.y),
5,
255)
if haystack.shape[0] > (Screen(0).getBounds()[2]/2) or haystack.shape[1] > (Screen(0).getBounds()[3]/2):
# Image is bigger than half the screen; scale it down
haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5)
Image.fromarray(haystack).show() | def function[debugPreview, parameter[self, title]]:
constant[ Displays the region in a preview window.
If the region is a Match, circles the target area. If the region is larger than half the
primary screen in either dimension, scales it down to half size.
]
variable[region] assign[=] name[self]
variable[haystack] assign[=] call[name[self].getBitmap, parameter[]]
if call[name[isinstance], parameter[name[region], name[Match]]] begin[:]
call[name[cv2].circle, parameter[name[haystack], tuple[[<ast.BinOp object at 0x7da18dc9b130>, <ast.BinOp object at 0x7da18dc999f0>]], constant[5], constant[255]]]
if <ast.BoolOp object at 0x7da18dc9bc10> begin[:]
variable[haystack] assign[=] call[name[cv2].resize, parameter[name[haystack], tuple[[<ast.Constant object at 0x7da18dc99a50>, <ast.Constant object at 0x7da18dc98550>]]]]
call[call[name[Image].fromarray, parameter[name[haystack]]].show, parameter[]] | keyword[def] identifier[debugPreview] ( identifier[self] , identifier[title] = literal[string] ):
literal[string]
identifier[region] = identifier[self]
identifier[haystack] = identifier[self] . identifier[getBitmap] ()
keyword[if] identifier[isinstance] ( identifier[region] , identifier[Match] ):
identifier[cv2] . identifier[circle] (
identifier[haystack] ,
( identifier[region] . identifier[getTarget] (). identifier[x] - identifier[self] . identifier[x] , identifier[region] . identifier[getTarget] (). identifier[y] - identifier[self] . identifier[y] ),
literal[int] ,
literal[int] )
keyword[if] identifier[haystack] . identifier[shape] [ literal[int] ]>( identifier[Screen] ( literal[int] ). identifier[getBounds] ()[ literal[int] ]/ literal[int] ) keyword[or] identifier[haystack] . identifier[shape] [ literal[int] ]>( identifier[Screen] ( literal[int] ). identifier[getBounds] ()[ literal[int] ]/ literal[int] ):
identifier[haystack] = identifier[cv2] . identifier[resize] ( identifier[haystack] ,( literal[int] , literal[int] ), identifier[fx] = literal[int] , identifier[fy] = literal[int] )
identifier[Image] . identifier[fromarray] ( identifier[haystack] ). identifier[show] () | def debugPreview(self, title='Debug'):
""" Displays the region in a preview window.
If the region is a Match, circles the target area. If the region is larger than half the
primary screen in either dimension, scales it down to half size.
"""
region = self
haystack = self.getBitmap()
if isinstance(region, Match):
cv2.circle(haystack, (region.getTarget().x - self.x, region.getTarget().y - self.y), 5, 255) # depends on [control=['if'], data=[]]
if haystack.shape[0] > Screen(0).getBounds()[2] / 2 or haystack.shape[1] > Screen(0).getBounds()[3] / 2:
# Image is bigger than half the screen; scale it down
haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5) # depends on [control=['if'], data=[]]
Image.fromarray(haystack).show() |
def urlopen(self):
"""
Grabs readable PNG file pointer
"""
req = Request(str(self))
try:
return urlopen(req)
except HTTPError:
_print('The server couldn\'t fulfill the request.')
except URLError:
_print('We failed to reach a server.') | def function[urlopen, parameter[self]]:
constant[
Grabs readable PNG file pointer
]
variable[req] assign[=] call[name[Request], parameter[call[name[str], parameter[name[self]]]]]
<ast.Try object at 0x7da1b021d360> | keyword[def] identifier[urlopen] ( identifier[self] ):
literal[string]
identifier[req] = identifier[Request] ( identifier[str] ( identifier[self] ))
keyword[try] :
keyword[return] identifier[urlopen] ( identifier[req] )
keyword[except] identifier[HTTPError] :
identifier[_print] ( literal[string] )
keyword[except] identifier[URLError] :
identifier[_print] ( literal[string] ) | def urlopen(self):
"""
Grabs readable PNG file pointer
"""
req = Request(str(self))
try:
return urlopen(req) # depends on [control=['try'], data=[]]
except HTTPError:
_print("The server couldn't fulfill the request.") # depends on [control=['except'], data=[]]
except URLError:
_print('We failed to reach a server.') # depends on [control=['except'], data=[]] |
def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename,'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close()
elif filename[-3:].lower() == '.gz':
f = gzip.GzipFile(filename,'wb') #pylint: disable=redefined-variable-type
f.write(self.xmlstring().encode('utf-8'))
f.close()
else:
f = io.open(filename,'w',encoding='utf-8')
f.write(self.xmlstring())
f.close() | def function[save, parameter[self, filename]]:
constant[Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
]
if <ast.UnaryOp object at 0x7da204344a00> begin[:]
variable[filename] assign[=] name[self].filename
if <ast.UnaryOp object at 0x7da204346770> begin[:]
<ast.Raise object at 0x7da204346cb0>
if compare[call[call[name[filename]][<ast.Slice object at 0x7da204345150>].lower, parameter[]] equal[==] constant[.bz2]] begin[:]
variable[f] assign[=] call[name[bz2].BZ2File, parameter[name[filename], constant[wb]]]
call[name[f].write, parameter[call[call[name[self].xmlstring, parameter[]].encode, parameter[constant[utf-8]]]]]
call[name[f].close, parameter[]] | keyword[def] identifier[save] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[filename] :
identifier[filename] = identifier[self] . identifier[filename]
keyword[if] keyword[not] identifier[filename] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[filename] [- literal[int] :]. identifier[lower] ()== literal[string] :
identifier[f] = identifier[bz2] . identifier[BZ2File] ( identifier[filename] , literal[string] )
identifier[f] . identifier[write] ( identifier[self] . identifier[xmlstring] (). identifier[encode] ( literal[string] ))
identifier[f] . identifier[close] ()
keyword[elif] identifier[filename] [- literal[int] :]. identifier[lower] ()== literal[string] :
identifier[f] = identifier[gzip] . identifier[GzipFile] ( identifier[filename] , literal[string] )
identifier[f] . identifier[write] ( identifier[self] . identifier[xmlstring] (). identifier[encode] ( literal[string] ))
identifier[f] . identifier[close] ()
keyword[else] :
identifier[f] = identifier[io] . identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] = literal[string] )
identifier[f] . identifier[write] ( identifier[self] . identifier[xmlstring] ())
identifier[f] . identifier[close] () | def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename # depends on [control=['if'], data=[]]
if not filename:
raise Exception('No filename specified') # depends on [control=['if'], data=[]]
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename, 'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close() # depends on [control=['if'], data=[]]
elif filename[-3:].lower() == '.gz':
f = gzip.GzipFile(filename, 'wb') #pylint: disable=redefined-variable-type
f.write(self.xmlstring().encode('utf-8'))
f.close() # depends on [control=['if'], data=[]]
else:
f = io.open(filename, 'w', encoding='utf-8')
f.write(self.xmlstring())
f.close() |
def on_displayed(self, callback, remove=False):
"""(Un)Register a widget displayed callback.
Parameters
----------
callback: method handler
Must have a signature of::
callback(widget, **kwargs)
kwargs from display are passed through without modification.
remove: bool
True if the callback should be unregistered."""
self._display_callbacks.register_callback(callback, remove=remove) | def function[on_displayed, parameter[self, callback, remove]]:
constant[(Un)Register a widget displayed callback.
Parameters
----------
callback: method handler
Must have a signature of::
callback(widget, **kwargs)
kwargs from display are passed through without modification.
remove: bool
True if the callback should be unregistered.]
call[name[self]._display_callbacks.register_callback, parameter[name[callback]]] | keyword[def] identifier[on_displayed] ( identifier[self] , identifier[callback] , identifier[remove] = keyword[False] ):
literal[string]
identifier[self] . identifier[_display_callbacks] . identifier[register_callback] ( identifier[callback] , identifier[remove] = identifier[remove] ) | def on_displayed(self, callback, remove=False):
"""(Un)Register a widget displayed callback.
Parameters
----------
callback: method handler
Must have a signature of::
callback(widget, **kwargs)
kwargs from display are passed through without modification.
remove: bool
True if the callback should be unregistered."""
self._display_callbacks.register_callback(callback, remove=remove) |
def _convert_xml_to_retention_policy(xml, retention_policy):
'''
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
'''
# Enabled
retention_policy.enabled = _bool(xml.find('Enabled').text)
# Days
days_element = xml.find('Days')
if days_element is not None:
retention_policy.days = int(days_element.text) | def function[_convert_xml_to_retention_policy, parameter[xml, retention_policy]]:
constant[
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
]
name[retention_policy].enabled assign[=] call[name[_bool], parameter[call[name[xml].find, parameter[constant[Enabled]]].text]]
variable[days_element] assign[=] call[name[xml].find, parameter[constant[Days]]]
if compare[name[days_element] is_not constant[None]] begin[:]
name[retention_policy].days assign[=] call[name[int], parameter[name[days_element].text]] | keyword[def] identifier[_convert_xml_to_retention_policy] ( identifier[xml] , identifier[retention_policy] ):
literal[string]
identifier[retention_policy] . identifier[enabled] = identifier[_bool] ( identifier[xml] . identifier[find] ( literal[string] ). identifier[text] )
identifier[days_element] = identifier[xml] . identifier[find] ( literal[string] )
keyword[if] identifier[days_element] keyword[is] keyword[not] keyword[None] :
identifier[retention_policy] . identifier[days] = identifier[int] ( identifier[days_element] . identifier[text] ) | def _convert_xml_to_retention_policy(xml, retention_policy):
"""
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
"""
# Enabled
retention_policy.enabled = _bool(xml.find('Enabled').text)
# Days
days_element = xml.find('Days')
if days_element is not None:
retention_policy.days = int(days_element.text) # depends on [control=['if'], data=['days_element']] |
def travis_build_package():
"""Assumed called on Travis, to prepare a package to be deployed
This method prints on stdout for Travis.
Return is obj to pass to sys.exit() directly
"""
travis_tag = os.environ.get("TRAVIS_TAG")
if not travis_tag:
print("TRAVIS_TAG environment variable is not present")
return "TRAVIS_TAG environment variable is not present"
try:
version = Version(travis_tag)
except InvalidVersion:
failure = "Version must be a valid PEP440 version (version is: {})".format(version)
print(failure)
return failure
abs_dist_path = Path(os.environ["TRAVIS_BUILD_DIR"], "dist")
[create_package(package, text_type(abs_dist_path)) for package in package_list]
print("Produced:\n{}".format(list(abs_dist_path.glob("*"))))
pattern = "*{}*".format(version)
packages = list(abs_dist_path.glob(pattern))
if not packages:
return "Package version does not match tag {}, abort".format(version)
pypi_server = os.environ.get("PYPI_SERVER", "default PyPI server")
print("Package created as expected and will be pushed to {}".format(pypi_server)) | def function[travis_build_package, parameter[]]:
constant[Assumed called on Travis, to prepare a package to be deployed
This method prints on stdout for Travis.
Return is obj to pass to sys.exit() directly
]
variable[travis_tag] assign[=] call[name[os].environ.get, parameter[constant[TRAVIS_TAG]]]
if <ast.UnaryOp object at 0x7da204621540> begin[:]
call[name[print], parameter[constant[TRAVIS_TAG environment variable is not present]]]
return[constant[TRAVIS_TAG environment variable is not present]]
<ast.Try object at 0x7da204621e40>
variable[abs_dist_path] assign[=] call[name[Path], parameter[call[name[os].environ][constant[TRAVIS_BUILD_DIR]], constant[dist]]]
<ast.ListComp object at 0x7da2046234c0>
call[name[print], parameter[call[constant[Produced:
{}].format, parameter[call[name[list], parameter[call[name[abs_dist_path].glob, parameter[constant[*]]]]]]]]]
variable[pattern] assign[=] call[constant[*{}*].format, parameter[name[version]]]
variable[packages] assign[=] call[name[list], parameter[call[name[abs_dist_path].glob, parameter[name[pattern]]]]]
if <ast.UnaryOp object at 0x7da2046236a0> begin[:]
return[call[constant[Package version does not match tag {}, abort].format, parameter[name[version]]]]
variable[pypi_server] assign[=] call[name[os].environ.get, parameter[constant[PYPI_SERVER], constant[default PyPI server]]]
call[name[print], parameter[call[constant[Package created as expected and will be pushed to {}].format, parameter[name[pypi_server]]]]] | keyword[def] identifier[travis_build_package] ():
literal[string]
identifier[travis_tag] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[travis_tag] :
identifier[print] ( literal[string] )
keyword[return] literal[string]
keyword[try] :
identifier[version] = identifier[Version] ( identifier[travis_tag] )
keyword[except] identifier[InvalidVersion] :
identifier[failure] = literal[string] . identifier[format] ( identifier[version] )
identifier[print] ( identifier[failure] )
keyword[return] identifier[failure]
identifier[abs_dist_path] = identifier[Path] ( identifier[os] . identifier[environ] [ literal[string] ], literal[string] )
[ identifier[create_package] ( identifier[package] , identifier[text_type] ( identifier[abs_dist_path] )) keyword[for] identifier[package] keyword[in] identifier[package_list] ]
identifier[print] ( literal[string] . identifier[format] ( identifier[list] ( identifier[abs_dist_path] . identifier[glob] ( literal[string] ))))
identifier[pattern] = literal[string] . identifier[format] ( identifier[version] )
identifier[packages] = identifier[list] ( identifier[abs_dist_path] . identifier[glob] ( identifier[pattern] ))
keyword[if] keyword[not] identifier[packages] :
keyword[return] literal[string] . identifier[format] ( identifier[version] )
identifier[pypi_server] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[pypi_server] )) | def travis_build_package():
"""Assumed called on Travis, to prepare a package to be deployed
This method prints on stdout for Travis.
Return is obj to pass to sys.exit() directly
"""
travis_tag = os.environ.get('TRAVIS_TAG')
if not travis_tag:
print('TRAVIS_TAG environment variable is not present')
return 'TRAVIS_TAG environment variable is not present' # depends on [control=['if'], data=[]]
try:
version = Version(travis_tag) # depends on [control=['try'], data=[]]
except InvalidVersion:
failure = 'Version must be a valid PEP440 version (version is: {})'.format(version)
print(failure)
return failure # depends on [control=['except'], data=[]]
abs_dist_path = Path(os.environ['TRAVIS_BUILD_DIR'], 'dist')
[create_package(package, text_type(abs_dist_path)) for package in package_list]
print('Produced:\n{}'.format(list(abs_dist_path.glob('*'))))
pattern = '*{}*'.format(version)
packages = list(abs_dist_path.glob(pattern))
if not packages:
return 'Package version does not match tag {}, abort'.format(version) # depends on [control=['if'], data=[]]
pypi_server = os.environ.get('PYPI_SERVER', 'default PyPI server')
print('Package created as expected and will be pushed to {}'.format(pypi_server)) |
async def parse(self, request):
"""Return a coroutine which parses data from request depends on content-type.
Usage: ::
def post(self, request):
data = await self.parse(request)
# ...
"""
if request.content_type in {'application/x-www-form-urlencoded', 'multipart/form-data'}:
return await request.post()
if request.content_type == 'application/json':
return await request.json()
return await request.text() | <ast.AsyncFunctionDef object at 0x7da2054a53c0> | keyword[async] keyword[def] identifier[parse] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] identifier[request] . identifier[content_type] keyword[in] { literal[string] , literal[string] }:
keyword[return] keyword[await] identifier[request] . identifier[post] ()
keyword[if] identifier[request] . identifier[content_type] == literal[string] :
keyword[return] keyword[await] identifier[request] . identifier[json] ()
keyword[return] keyword[await] identifier[request] . identifier[text] () | async def parse(self, request):
"""Return a coroutine which parses data from request depends on content-type.
Usage: ::
def post(self, request):
data = await self.parse(request)
# ...
"""
if request.content_type in {'application/x-www-form-urlencoded', 'multipart/form-data'}:
return await request.post() # depends on [control=['if'], data=[]]
if request.content_type == 'application/json':
return await request.json() # depends on [control=['if'], data=[]]
return await request.text() |
def _contains_cftime_datetimes(array) -> bool:
"""Check if an array contains cftime.datetime objects
"""
try:
from cftime import datetime as cftime_datetime
except ImportError:
return False
else:
if array.dtype == np.dtype('O') and array.size > 0:
sample = array.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime_datetime)
else:
return False | def function[_contains_cftime_datetimes, parameter[array]]:
constant[Check if an array contains cftime.datetime objects
]
<ast.Try object at 0x7da18ede68c0> | keyword[def] identifier[_contains_cftime_datetimes] ( identifier[array] )-> identifier[bool] :
literal[string]
keyword[try] :
keyword[from] identifier[cftime] keyword[import] identifier[datetime] keyword[as] identifier[cftime_datetime]
keyword[except] identifier[ImportError] :
keyword[return] keyword[False]
keyword[else] :
keyword[if] identifier[array] . identifier[dtype] == identifier[np] . identifier[dtype] ( literal[string] ) keyword[and] identifier[array] . identifier[size] > literal[int] :
identifier[sample] = identifier[array] . identifier[ravel] ()[ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[sample] , identifier[dask_array_type] ):
identifier[sample] = identifier[sample] . identifier[compute] ()
keyword[if] identifier[isinstance] ( identifier[sample] , identifier[np] . identifier[ndarray] ):
identifier[sample] = identifier[sample] . identifier[item] ()
keyword[return] identifier[isinstance] ( identifier[sample] , identifier[cftime_datetime] )
keyword[else] :
keyword[return] keyword[False] | def _contains_cftime_datetimes(array) -> bool:
"""Check if an array contains cftime.datetime objects
"""
try:
from cftime import datetime as cftime_datetime # depends on [control=['try'], data=[]]
except ImportError:
return False # depends on [control=['except'], data=[]]
else:
if array.dtype == np.dtype('O') and array.size > 0:
sample = array.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return isinstance(sample, cftime_datetime) # depends on [control=['if'], data=[]]
else:
return False |
def expect_lit(char, buf, pos):
"""Expect a literal character at the current buffer position."""
if pos >= len(buf) or buf[pos] != char:
return None, len(buf)
return char, pos+1 | def function[expect_lit, parameter[char, buf, pos]]:
constant[Expect a literal character at the current buffer position.]
if <ast.BoolOp object at 0x7da1b02e6500> begin[:]
return[tuple[[<ast.Constant object at 0x7da1b0275300>, <ast.Call object at 0x7da1b0275900>]]]
return[tuple[[<ast.Name object at 0x7da1b0274e80>, <ast.BinOp object at 0x7da1b0274c70>]]] | keyword[def] identifier[expect_lit] ( identifier[char] , identifier[buf] , identifier[pos] ):
literal[string]
keyword[if] identifier[pos] >= identifier[len] ( identifier[buf] ) keyword[or] identifier[buf] [ identifier[pos] ]!= identifier[char] :
keyword[return] keyword[None] , identifier[len] ( identifier[buf] )
keyword[return] identifier[char] , identifier[pos] + literal[int] | def expect_lit(char, buf, pos):
"""Expect a literal character at the current buffer position."""
if pos >= len(buf) or buf[pos] != char:
return (None, len(buf)) # depends on [control=['if'], data=[]]
return (char, pos + 1) |
def lesser(lhs, rhs):
"""Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_lesser,
lambda x, y: 1 if x < y else 0,
_internal._lesser_scalar,
_internal._greater_scalar) | def function[lesser, parameter[lhs, rhs]]:
constant[Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32)
]
return[call[name[_ufunc_helper], parameter[name[lhs], name[rhs], name[op].broadcast_lesser, <ast.Lambda object at 0x7da1b200bdf0>, name[_internal]._lesser_scalar, name[_internal]._greater_scalar]]] | keyword[def] identifier[lesser] ( identifier[lhs] , identifier[rhs] ):
literal[string]
keyword[return] identifier[_ufunc_helper] (
identifier[lhs] ,
identifier[rhs] ,
identifier[op] . identifier[broadcast_lesser] ,
keyword[lambda] identifier[x] , identifier[y] : literal[int] keyword[if] identifier[x] < identifier[y] keyword[else] literal[int] ,
identifier[_internal] . identifier[_lesser_scalar] ,
identifier[_internal] . identifier[_greater_scalar] ) | def lesser(lhs, rhs):
"""Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(lhs, rhs, op.broadcast_lesser, lambda x, y: 1 if x < y else 0, _internal._lesser_scalar, _internal._greater_scalar) |
def getService(self, name, auto_execute=True):
"""
Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@rtype: L{ServiceProxy}
"""
if not isinstance(name, basestring):
raise TypeError('string type required')
return ServiceProxy(self, name, auto_execute) | def function[getService, parameter[self, name, auto_execute]]:
constant[
Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@rtype: L{ServiceProxy}
]
if <ast.UnaryOp object at 0x7da18f722770> begin[:]
<ast.Raise object at 0x7da1b1435ea0>
return[call[name[ServiceProxy], parameter[name[self], name[name], name[auto_execute]]]] | keyword[def] identifier[getService] ( identifier[self] , identifier[name] , identifier[auto_execute] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[ServiceProxy] ( identifier[self] , identifier[name] , identifier[auto_execute] ) | def getService(self, name, auto_execute=True):
"""
Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@rtype: L{ServiceProxy}
"""
if not isinstance(name, basestring):
raise TypeError('string type required') # depends on [control=['if'], data=[]]
return ServiceProxy(self, name, auto_execute) |
def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in
itertools.product([x, y, z],
[new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop | def function[get_rot, parameter[slab]]:
constant[
Gets the transformation to rotate the z axis into the miller index
]
variable[new_z] assign[=] call[name[get_mi_vec], parameter[name[slab]]]
<ast.Tuple object at 0x7da204347310> assign[=] name[slab].lattice.matrix
variable[new_x] assign[=] binary_operation[name[a] / call[name[np].linalg.norm, parameter[name[a]]]]
variable[new_y] assign[=] call[name[np].cross, parameter[name[new_z], name[new_x]]]
<ast.Tuple object at 0x7da2043462c0> assign[=] call[name[np].eye, parameter[constant[3]]]
variable[rot_matrix] assign[=] call[call[name[np].array, parameter[<ast.ListComp object at 0x7da204346b00>]].reshape, parameter[constant[3], constant[3]]]
variable[rot_matrix] assign[=] call[name[np].transpose, parameter[name[rot_matrix]]]
variable[sop] assign[=] call[name[SymmOp].from_rotation_and_translation, parameter[name[rot_matrix]]]
return[name[sop]] | keyword[def] identifier[get_rot] ( identifier[slab] ):
literal[string]
identifier[new_z] = identifier[get_mi_vec] ( identifier[slab] )
identifier[a] , identifier[b] , identifier[c] = identifier[slab] . identifier[lattice] . identifier[matrix]
identifier[new_x] = identifier[a] / identifier[np] . identifier[linalg] . identifier[norm] ( identifier[a] )
identifier[new_y] = identifier[np] . identifier[cross] ( identifier[new_z] , identifier[new_x] )
identifier[x] , identifier[y] , identifier[z] = identifier[np] . identifier[eye] ( literal[int] )
identifier[rot_matrix] = identifier[np] . identifier[array] ([ identifier[np] . identifier[dot] (* identifier[el] ) keyword[for] identifier[el] keyword[in]
identifier[itertools] . identifier[product] ([ identifier[x] , identifier[y] , identifier[z] ],
[ identifier[new_x] , identifier[new_y] , identifier[new_z] ])]). identifier[reshape] ( literal[int] , literal[int] )
identifier[rot_matrix] = identifier[np] . identifier[transpose] ( identifier[rot_matrix] )
identifier[sop] = identifier[SymmOp] . identifier[from_rotation_and_translation] ( identifier[rot_matrix] )
keyword[return] identifier[sop] | def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
(a, b, c) = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
(x, y, z) = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in itertools.product([x, y, z], [new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop |
def get_published_events(self, process=True) -> List[Event]:
"""Get a list of published (pending) events.
Return a list of Event objects which have been published
and are therefore pending to be processed. If the process argument
is set to true, any events returned from this method will also be
marked as processed by moving them to the processed events queue.
This method is intended to be used either to print the list of
pending published events, or also to recover from events
missed by the get() method. The latter of these use cases may be needed
for recovering when a subscriber drops out.
Args:
process (bool): If true, also move the events to the Processed
event queue.
Return:
list[Events], list of Event objects
"""
LOG.debug('Getting published events (%s)', self._pub_key)
if process:
LOG.debug('Marking returned published events as processed.')
DB.watch(self._pub_key, pipeline=True)
event_ids = DB.get_list(self._pub_key, pipeline=True)
if event_ids:
DB.delete(self._pub_key, pipeline=True)
DB.append_to_list(self._processed_key, *event_ids,
pipeline=True)
DB.execute()
else:
event_ids = DB.get_list(self._pub_key)
events = []
for event_id in event_ids[::-1]:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict['id'] = event_id
event = Event.from_config(event_dict)
LOG.debug('Loaded event: %s (%s)', event.id, event.type)
events.append(event)
return events | def function[get_published_events, parameter[self, process]]:
constant[Get a list of published (pending) events.
Return a list of Event objects which have been published
and are therefore pending to be processed. If the process argument
is set to true, any events returned from this method will also be
marked as processed by moving them to the processed events queue.
This method is intended to be used either to print the list of
pending published events, or also to recover from events
missed by the get() method. The latter of these use cases may be needed
for recovering when a subscriber drops out.
Args:
process (bool): If true, also move the events to the Processed
event queue.
Return:
list[Events], list of Event objects
]
call[name[LOG].debug, parameter[constant[Getting published events (%s)], name[self]._pub_key]]
if name[process] begin[:]
call[name[LOG].debug, parameter[constant[Marking returned published events as processed.]]]
call[name[DB].watch, parameter[name[self]._pub_key]]
variable[event_ids] assign[=] call[name[DB].get_list, parameter[name[self]._pub_key]]
if name[event_ids] begin[:]
call[name[DB].delete, parameter[name[self]._pub_key]]
call[name[DB].append_to_list, parameter[name[self]._processed_key, <ast.Starred object at 0x7da18bc70e20>]]
call[name[DB].execute, parameter[]]
variable[events] assign[=] list[[]]
for taget[name[event_id]] in starred[call[name[event_ids]][<ast.Slice object at 0x7da18bc706a0>]] begin[:]
variable[event_str] assign[=] call[name[DB].get_hash_value, parameter[name[self]._data_key, name[event_id]]]
variable[event_dict] assign[=] call[name[ast].literal_eval, parameter[name[event_str]]]
call[name[event_dict]][constant[id]] assign[=] name[event_id]
variable[event] assign[=] call[name[Event].from_config, parameter[name[event_dict]]]
call[name[LOG].debug, parameter[constant[Loaded event: %s (%s)], name[event].id, name[event].type]]
call[name[events].append, parameter[name[event]]]
return[name[events]] | keyword[def] identifier[get_published_events] ( identifier[self] , identifier[process] = keyword[True] )-> identifier[List] [ identifier[Event] ]:
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[self] . identifier[_pub_key] )
keyword[if] identifier[process] :
identifier[LOG] . identifier[debug] ( literal[string] )
identifier[DB] . identifier[watch] ( identifier[self] . identifier[_pub_key] , identifier[pipeline] = keyword[True] )
identifier[event_ids] = identifier[DB] . identifier[get_list] ( identifier[self] . identifier[_pub_key] , identifier[pipeline] = keyword[True] )
keyword[if] identifier[event_ids] :
identifier[DB] . identifier[delete] ( identifier[self] . identifier[_pub_key] , identifier[pipeline] = keyword[True] )
identifier[DB] . identifier[append_to_list] ( identifier[self] . identifier[_processed_key] ,* identifier[event_ids] ,
identifier[pipeline] = keyword[True] )
identifier[DB] . identifier[execute] ()
keyword[else] :
identifier[event_ids] = identifier[DB] . identifier[get_list] ( identifier[self] . identifier[_pub_key] )
identifier[events] =[]
keyword[for] identifier[event_id] keyword[in] identifier[event_ids] [::- literal[int] ]:
identifier[event_str] = identifier[DB] . identifier[get_hash_value] ( identifier[self] . identifier[_data_key] , identifier[event_id] )
identifier[event_dict] = identifier[ast] . identifier[literal_eval] ( identifier[event_str] )
identifier[event_dict] [ literal[string] ]= identifier[event_id]
identifier[event] = identifier[Event] . identifier[from_config] ( identifier[event_dict] )
identifier[LOG] . identifier[debug] ( literal[string] , identifier[event] . identifier[id] , identifier[event] . identifier[type] )
identifier[events] . identifier[append] ( identifier[event] )
keyword[return] identifier[events] | def get_published_events(self, process=True) -> List[Event]:
"""Get a list of published (pending) events.
Return a list of Event objects which have been published
and are therefore pending to be processed. If the process argument
is set to true, any events returned from this method will also be
marked as processed by moving them to the processed events queue.
This method is intended to be used either to print the list of
pending published events, or also to recover from events
missed by the get() method. The latter of these use cases may be needed
for recovering when a subscriber drops out.
Args:
process (bool): If true, also move the events to the Processed
event queue.
Return:
list[Events], list of Event objects
"""
LOG.debug('Getting published events (%s)', self._pub_key)
if process:
LOG.debug('Marking returned published events as processed.')
DB.watch(self._pub_key, pipeline=True)
event_ids = DB.get_list(self._pub_key, pipeline=True)
if event_ids:
DB.delete(self._pub_key, pipeline=True)
DB.append_to_list(self._processed_key, *event_ids, pipeline=True) # depends on [control=['if'], data=[]]
DB.execute() # depends on [control=['if'], data=[]]
else:
event_ids = DB.get_list(self._pub_key)
events = []
for event_id in event_ids[::-1]:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict['id'] = event_id
event = Event.from_config(event_dict)
LOG.debug('Loaded event: %s (%s)', event.id, event.type)
events.append(event) # depends on [control=['for'], data=['event_id']]
return events |
def _imm_default_init(self, *args, **kwargs):
'''
An immutable's defalt initialization function is to accept any number of dictionaries followed
by any number of keyword args and to turn them all into the parameters of the immutable that is
being created.
'''
for (k,v) in six.iteritems({k:v for dct in (args + (kwargs,)) for (k,v) in dct}):
setattr(self, k, v) | def function[_imm_default_init, parameter[self]]:
constant[
An immutable's defalt initialization function is to accept any number of dictionaries followed
by any number of keyword args and to turn them all into the parameters of the immutable that is
being created.
]
for taget[tuple[[<ast.Name object at 0x7da1b2298610>, <ast.Name object at 0x7da1b22989d0>]]] in starred[call[name[six].iteritems, parameter[<ast.DictComp object at 0x7da1b2298100>]]] begin[:]
call[name[setattr], parameter[name[self], name[k], name[v]]] | keyword[def] identifier[_imm_default_init] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[six] . identifier[iteritems] ({ identifier[k] : identifier[v] keyword[for] identifier[dct] keyword[in] ( identifier[args] +( identifier[kwargs] ,)) keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[dct] }):
identifier[setattr] ( identifier[self] , identifier[k] , identifier[v] ) | def _imm_default_init(self, *args, **kwargs):
"""
An immutable's defalt initialization function is to accept any number of dictionaries followed
by any number of keyword args and to turn them all into the parameters of the immutable that is
being created.
"""
for (k, v) in six.iteritems({k: v for dct in args + (kwargs,) for (k, v) in dct}):
setattr(self, k, v) # depends on [control=['for'], data=[]] |
def anchored_pairs(self, anchor):
"""
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
"""
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs) | def function[anchored_pairs, parameter[self, anchor]]:
constant[
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
]
variable[pairs] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[term]] in starred[name[self].keys] begin[:]
variable[score] assign[=] call[name[self].get_pair, parameter[name[anchor], name[term]]]
if name[score] begin[:]
call[name[pairs]][name[term]] assign[=] name[score]
return[call[name[utils].sort_dict, parameter[name[pairs]]]] | keyword[def] identifier[anchored_pairs] ( identifier[self] , identifier[anchor] ):
literal[string]
identifier[pairs] = identifier[OrderedDict] ()
keyword[for] identifier[term] keyword[in] identifier[self] . identifier[keys] :
identifier[score] = identifier[self] . identifier[get_pair] ( identifier[anchor] , identifier[term] )
keyword[if] identifier[score] : identifier[pairs] [ identifier[term] ]= identifier[score]
keyword[return] identifier[utils] . identifier[sort_dict] ( identifier[pairs] ) | def anchored_pairs(self, anchor):
"""
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
"""
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score:
pairs[term] = score # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['term']]
return utils.sort_dict(pairs) |
def render(template, extra={}, **kwargs):
"""Return the template rendered using Python's str.format()."""
context = hookenv.execution_environment()
context.update(extra)
context.update(kwargs)
return template.format(**context) | def function[render, parameter[template, extra]]:
constant[Return the template rendered using Python's str.format().]
variable[context] assign[=] call[name[hookenv].execution_environment, parameter[]]
call[name[context].update, parameter[name[extra]]]
call[name[context].update, parameter[name[kwargs]]]
return[call[name[template].format, parameter[]]] | keyword[def] identifier[render] ( identifier[template] , identifier[extra] ={},** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[hookenv] . identifier[execution_environment] ()
identifier[context] . identifier[update] ( identifier[extra] )
identifier[context] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[template] . identifier[format] (** identifier[context] ) | def render(template, extra={}, **kwargs):
"""Return the template rendered using Python's str.format()."""
context = hookenv.execution_environment()
context.update(extra)
context.update(kwargs)
return template.format(**context) |
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups] | def function[which_roles_can, parameter[self, name]]:
constant[Which role can SendMail? ]
variable[targetPermissionRecords] assign[=] call[call[name[AuthPermission].objects, parameter[]].first, parameter[]]
return[<ast.ListComp object at 0x7da1b0fdd6c0>] | keyword[def] identifier[which_roles_can] ( identifier[self] , identifier[name] ):
literal[string]
identifier[targetPermissionRecords] = identifier[AuthPermission] . identifier[objects] ( identifier[creator] = identifier[self] . identifier[client] , identifier[name] = identifier[name] ). identifier[first] ()
keyword[return] [{ literal[string] : identifier[group] . identifier[role] } keyword[for] identifier[group] keyword[in] identifier[targetPermissionRecords] . identifier[groups] ] | def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups] |
def constraint_from_choices(cls, value_type: type, choices: collections.Sequence):
"""
Returns a constraint callable based on choices of a given type
"""
choices_str = ', '.join(map(str, choices))
def constraint(value):
value = value_type(value)
if value not in choices:
raise ParameterError('Argument must be one of %s' % choices_str)
return value
constraint.__name__ = 'choices_%s' % value_type.__name__
constraint.__doc__ = 'choice of %s' % choices_str
return constraint | def function[constraint_from_choices, parameter[cls, value_type, choices]]:
constant[
Returns a constraint callable based on choices of a given type
]
variable[choices_str] assign[=] call[constant[, ].join, parameter[call[name[map], parameter[name[str], name[choices]]]]]
def function[constraint, parameter[value]]:
variable[value] assign[=] call[name[value_type], parameter[name[value]]]
if compare[name[value] <ast.NotIn object at 0x7da2590d7190> name[choices]] begin[:]
<ast.Raise object at 0x7da20c7c9240>
return[name[value]]
name[constraint].__name__ assign[=] binary_operation[constant[choices_%s] <ast.Mod object at 0x7da2590d6920> name[value_type].__name__]
name[constraint].__doc__ assign[=] binary_operation[constant[choice of %s] <ast.Mod object at 0x7da2590d6920> name[choices_str]]
return[name[constraint]] | keyword[def] identifier[constraint_from_choices] ( identifier[cls] , identifier[value_type] : identifier[type] , identifier[choices] : identifier[collections] . identifier[Sequence] ):
literal[string]
identifier[choices_str] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[choices] ))
keyword[def] identifier[constraint] ( identifier[value] ):
identifier[value] = identifier[value_type] ( identifier[value] )
keyword[if] identifier[value] keyword[not] keyword[in] identifier[choices] :
keyword[raise] identifier[ParameterError] ( literal[string] % identifier[choices_str] )
keyword[return] identifier[value]
identifier[constraint] . identifier[__name__] = literal[string] % identifier[value_type] . identifier[__name__]
identifier[constraint] . identifier[__doc__] = literal[string] % identifier[choices_str]
keyword[return] identifier[constraint] | def constraint_from_choices(cls, value_type: type, choices: collections.Sequence):
"""
Returns a constraint callable based on choices of a given type
"""
choices_str = ', '.join(map(str, choices))
def constraint(value):
value = value_type(value)
if value not in choices:
raise ParameterError('Argument must be one of %s' % choices_str) # depends on [control=['if'], data=[]]
return value
constraint.__name__ = 'choices_%s' % value_type.__name__
constraint.__doc__ = 'choice of %s' % choices_str
return constraint |
def initial_sanity_check(self):
"""Checks if we can create the project"""
# Check for python module collision
self.try_import(self.project_name)
# Is the name a valid identifier?
self.validate_name(self.project_name)
# Make sure we don't mess with existing directories
if os.path.exists(self.project_name):
print("Directory {} already exist. Aborting.".format(self.project_name))
return False
if os.path.exists('manage.py'):
print("A manage.py file already exist in the current directory. Aborting.")
return False
return True | def function[initial_sanity_check, parameter[self]]:
constant[Checks if we can create the project]
call[name[self].try_import, parameter[name[self].project_name]]
call[name[self].validate_name, parameter[name[self].project_name]]
if call[name[os].path.exists, parameter[name[self].project_name]] begin[:]
call[name[print], parameter[call[constant[Directory {} already exist. Aborting.].format, parameter[name[self].project_name]]]]
return[constant[False]]
if call[name[os].path.exists, parameter[constant[manage.py]]] begin[:]
call[name[print], parameter[constant[A manage.py file already exist in the current directory. Aborting.]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[initial_sanity_check] ( identifier[self] ):
literal[string]
identifier[self] . identifier[try_import] ( identifier[self] . identifier[project_name] )
identifier[self] . identifier[validate_name] ( identifier[self] . identifier[project_name] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[project_name] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[project_name] ))
keyword[return] keyword[False]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ):
identifier[print] ( literal[string] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def initial_sanity_check(self):
"""Checks if we can create the project""" # Check for python module collision
self.try_import(self.project_name) # Is the name a valid identifier?
self.validate_name(self.project_name) # Make sure we don't mess with existing directories
if os.path.exists(self.project_name):
print('Directory {} already exist. Aborting.'.format(self.project_name))
return False # depends on [control=['if'], data=[]]
if os.path.exists('manage.py'):
print('A manage.py file already exist in the current directory. Aborting.')
return False # depends on [control=['if'], data=[]]
return True |
def setup_gui(self):
"""Setup the main layout of the widget."""
scrollarea = self.setup_scrollarea()
up_btn, down_btn = self.setup_arrow_buttons()
self.setFixedWidth(150)
layout = QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(up_btn)
layout.addWidget(scrollarea)
layout.addWidget(down_btn) | def function[setup_gui, parameter[self]]:
constant[Setup the main layout of the widget.]
variable[scrollarea] assign[=] call[name[self].setup_scrollarea, parameter[]]
<ast.Tuple object at 0x7da20e9b3340> assign[=] call[name[self].setup_arrow_buttons, parameter[]]
call[name[self].setFixedWidth, parameter[constant[150]]]
variable[layout] assign[=] call[name[QVBoxLayout], parameter[name[self]]]
call[name[layout].setContentsMargins, parameter[constant[0], constant[0], constant[0], constant[0]]]
call[name[layout].setSpacing, parameter[constant[0]]]
call[name[layout].addWidget, parameter[name[up_btn]]]
call[name[layout].addWidget, parameter[name[scrollarea]]]
call[name[layout].addWidget, parameter[name[down_btn]]] | keyword[def] identifier[setup_gui] ( identifier[self] ):
literal[string]
identifier[scrollarea] = identifier[self] . identifier[setup_scrollarea] ()
identifier[up_btn] , identifier[down_btn] = identifier[self] . identifier[setup_arrow_buttons] ()
identifier[self] . identifier[setFixedWidth] ( literal[int] )
identifier[layout] = identifier[QVBoxLayout] ( identifier[self] )
identifier[layout] . identifier[setContentsMargins] ( literal[int] , literal[int] , literal[int] , literal[int] )
identifier[layout] . identifier[setSpacing] ( literal[int] )
identifier[layout] . identifier[addWidget] ( identifier[up_btn] )
identifier[layout] . identifier[addWidget] ( identifier[scrollarea] )
identifier[layout] . identifier[addWidget] ( identifier[down_btn] ) | def setup_gui(self):
"""Setup the main layout of the widget."""
scrollarea = self.setup_scrollarea()
(up_btn, down_btn) = self.setup_arrow_buttons()
self.setFixedWidth(150)
layout = QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(up_btn)
layout.addWidget(scrollarea)
layout.addWidget(down_btn) |
def resize_pane(pymux, variables):
"""
Resize/zoom the active pane.
"""
try:
left = int(variables['<left>'] or 0)
right = int(variables['<right>'] or 0)
up = int(variables['<up>'] or 0)
down = int(variables['<down>'] or 0)
except ValueError:
raise CommandException('Expecting an integer.')
w = pymux.arrangement.get_active_window()
if w:
w.change_size_for_active_pane(up=up, right=right, down=down, left=left)
# Zoom in/out.
if variables['-Z']:
w.zoom = not w.zoom | def function[resize_pane, parameter[pymux, variables]]:
constant[
Resize/zoom the active pane.
]
<ast.Try object at 0x7da20c6ab970>
variable[w] assign[=] call[name[pymux].arrangement.get_active_window, parameter[]]
if name[w] begin[:]
call[name[w].change_size_for_active_pane, parameter[]]
if call[name[variables]][constant[-Z]] begin[:]
name[w].zoom assign[=] <ast.UnaryOp object at 0x7da18bc70d30> | keyword[def] identifier[resize_pane] ( identifier[pymux] , identifier[variables] ):
literal[string]
keyword[try] :
identifier[left] = identifier[int] ( identifier[variables] [ literal[string] ] keyword[or] literal[int] )
identifier[right] = identifier[int] ( identifier[variables] [ literal[string] ] keyword[or] literal[int] )
identifier[up] = identifier[int] ( identifier[variables] [ literal[string] ] keyword[or] literal[int] )
identifier[down] = identifier[int] ( identifier[variables] [ literal[string] ] keyword[or] literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[CommandException] ( literal[string] )
identifier[w] = identifier[pymux] . identifier[arrangement] . identifier[get_active_window] ()
keyword[if] identifier[w] :
identifier[w] . identifier[change_size_for_active_pane] ( identifier[up] = identifier[up] , identifier[right] = identifier[right] , identifier[down] = identifier[down] , identifier[left] = identifier[left] )
keyword[if] identifier[variables] [ literal[string] ]:
identifier[w] . identifier[zoom] = keyword[not] identifier[w] . identifier[zoom] | def resize_pane(pymux, variables):
"""
Resize/zoom the active pane.
"""
try:
left = int(variables['<left>'] or 0)
right = int(variables['<right>'] or 0)
up = int(variables['<up>'] or 0)
down = int(variables['<down>'] or 0) # depends on [control=['try'], data=[]]
except ValueError:
raise CommandException('Expecting an integer.') # depends on [control=['except'], data=[]]
w = pymux.arrangement.get_active_window()
if w:
w.change_size_for_active_pane(up=up, right=right, down=down, left=left)
# Zoom in/out.
if variables['-Z']:
w.zoom = not w.zoom # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def _validate_entities(self, stages):
"""
Purpose: Validate whether the argument 'stages' is of list of Stage objects
:argument: list of Stage objects
"""
if not stages:
raise TypeError(expected_type=Stage, actual_type=type(stages))
if not isinstance(stages, list):
stages = [stages]
for value in stages:
if not isinstance(value, Stage):
raise TypeError(expected_type=Stage, actual_type=type(value))
return stages | def function[_validate_entities, parameter[self, stages]]:
constant[
Purpose: Validate whether the argument 'stages' is of list of Stage objects
:argument: list of Stage objects
]
if <ast.UnaryOp object at 0x7da1b0fd13c0> begin[:]
<ast.Raise object at 0x7da1b0fd08b0>
if <ast.UnaryOp object at 0x7da1b0ff92d0> begin[:]
variable[stages] assign[=] list[[<ast.Name object at 0x7da1b0ff8460>]]
for taget[name[value]] in starred[name[stages]] begin[:]
if <ast.UnaryOp object at 0x7da1b0ff8790> begin[:]
<ast.Raise object at 0x7da1b0ff9060>
return[name[stages]] | keyword[def] identifier[_validate_entities] ( identifier[self] , identifier[stages] ):
literal[string]
keyword[if] keyword[not] identifier[stages] :
keyword[raise] identifier[TypeError] ( identifier[expected_type] = identifier[Stage] , identifier[actual_type] = identifier[type] ( identifier[stages] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[stages] , identifier[list] ):
identifier[stages] =[ identifier[stages] ]
keyword[for] identifier[value] keyword[in] identifier[stages] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[Stage] ):
keyword[raise] identifier[TypeError] ( identifier[expected_type] = identifier[Stage] , identifier[actual_type] = identifier[type] ( identifier[value] ))
keyword[return] identifier[stages] | def _validate_entities(self, stages):
"""
Purpose: Validate whether the argument 'stages' is of list of Stage objects
:argument: list of Stage objects
"""
if not stages:
raise TypeError(expected_type=Stage, actual_type=type(stages)) # depends on [control=['if'], data=[]]
if not isinstance(stages, list):
stages = [stages] # depends on [control=['if'], data=[]]
for value in stages:
if not isinstance(value, Stage):
raise TypeError(expected_type=Stage, actual_type=type(value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value']]
return stages |
def close(self):
"""Close all communication process streams."""
windll.kernel32.CloseHandle(self.conout_pipe)
windll.kernel32.CloseHandle(self.conin_pipe) | def function[close, parameter[self]]:
constant[Close all communication process streams.]
call[name[windll].kernel32.CloseHandle, parameter[name[self].conout_pipe]]
call[name[windll].kernel32.CloseHandle, parameter[name[self].conin_pipe]] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[windll] . identifier[kernel32] . identifier[CloseHandle] ( identifier[self] . identifier[conout_pipe] )
identifier[windll] . identifier[kernel32] . identifier[CloseHandle] ( identifier[self] . identifier[conin_pipe] ) | def close(self):
"""Close all communication process streams."""
windll.kernel32.CloseHandle(self.conout_pipe)
windll.kernel32.CloseHandle(self.conin_pipe) |
def pack(self, out: IO):
"""
Write the AttributeTable to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
"""
out.write(pack('>H', len(self._table)))
for attribute in self:
info = attribute.pack()
out.write(pack(
'>HI',
attribute.name.index,
len(info)
))
out.write(info) | def function[pack, parameter[self, out]]:
constant[
Write the AttributeTable to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
]
call[name[out].write, parameter[call[name[pack], parameter[constant[>H], call[name[len], parameter[name[self]._table]]]]]]
for taget[name[attribute]] in starred[name[self]] begin[:]
variable[info] assign[=] call[name[attribute].pack, parameter[]]
call[name[out].write, parameter[call[name[pack], parameter[constant[>HI], name[attribute].name.index, call[name[len], parameter[name[info]]]]]]]
call[name[out].write, parameter[name[info]]] | keyword[def] identifier[pack] ( identifier[self] , identifier[out] : identifier[IO] ):
literal[string]
identifier[out] . identifier[write] ( identifier[pack] ( literal[string] , identifier[len] ( identifier[self] . identifier[_table] )))
keyword[for] identifier[attribute] keyword[in] identifier[self] :
identifier[info] = identifier[attribute] . identifier[pack] ()
identifier[out] . identifier[write] ( identifier[pack] (
literal[string] ,
identifier[attribute] . identifier[name] . identifier[index] ,
identifier[len] ( identifier[info] )
))
identifier[out] . identifier[write] ( identifier[info] ) | def pack(self, out: IO):
"""
Write the AttributeTable to the file-like object `out`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when saving a ClassFile.
:param out: Any file-like object providing `write()`
"""
out.write(pack('>H', len(self._table)))
for attribute in self:
info = attribute.pack()
out.write(pack('>HI', attribute.name.index, len(info)))
out.write(info) # depends on [control=['for'], data=['attribute']] |
def colorRGB(r, g, b):
"""
Given the R,G,B int values for the RGB color mode in the range [0..255],
return a RGB color tuple with float values in the range [0..1].
"""
return (float(r / 255), float(g / 255), float(b / 255)) | def function[colorRGB, parameter[r, g, b]]:
constant[
Given the R,G,B int values for the RGB color mode in the range [0..255],
return a RGB color tuple with float values in the range [0..1].
]
return[tuple[[<ast.Call object at 0x7da1b2381b70>, <ast.Call object at 0x7da1b2380070>, <ast.Call object at 0x7da1b23839a0>]]] | keyword[def] identifier[colorRGB] ( identifier[r] , identifier[g] , identifier[b] ):
literal[string]
keyword[return] ( identifier[float] ( identifier[r] / literal[int] ), identifier[float] ( identifier[g] / literal[int] ), identifier[float] ( identifier[b] / literal[int] )) | def colorRGB(r, g, b):
"""
Given the R,G,B int values for the RGB color mode in the range [0..255],
return a RGB color tuple with float values in the range [0..1].
"""
return (float(r / 255), float(g / 255), float(b / 255)) |
def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct | def function[read_struct, parameter[self, struct_class]]:
constant[Parse and return a structure from the current buffer offset.]
variable[struct] assign[=] call[name[struct_class].unpack_from, parameter[call[name[bytearray_to_buff], parameter[name[self]._data]], name[self]._offset]]
call[name[self].skip, parameter[name[struct_class].size]]
return[name[struct]] | keyword[def] identifier[read_struct] ( identifier[self] , identifier[struct_class] ):
literal[string]
identifier[struct] = identifier[struct_class] . identifier[unpack_from] ( identifier[bytearray_to_buff] ( identifier[self] . identifier[_data] ), identifier[self] . identifier[_offset] )
identifier[self] . identifier[skip] ( identifier[struct_class] . identifier[size] )
keyword[return] identifier[struct] | def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct |
def get_full_history(self, force=None, last_update=None, flush=False):
'''
Fields change depending on when you run activity_import,
such as "last_updated" type fields which don't have activity
being tracked, which means we'll always end up with different
hash values, so we need to always remove all existing object
states and import fresh
'''
return self._run_object_import(force=force, last_update=last_update,
flush=flush, full_history=True) | def function[get_full_history, parameter[self, force, last_update, flush]]:
constant[
Fields change depending on when you run activity_import,
such as "last_updated" type fields which don't have activity
being tracked, which means we'll always end up with different
hash values, so we need to always remove all existing object
states and import fresh
]
return[call[name[self]._run_object_import, parameter[]]] | keyword[def] identifier[get_full_history] ( identifier[self] , identifier[force] = keyword[None] , identifier[last_update] = keyword[None] , identifier[flush] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_run_object_import] ( identifier[force] = identifier[force] , identifier[last_update] = identifier[last_update] ,
identifier[flush] = identifier[flush] , identifier[full_history] = keyword[True] ) | def get_full_history(self, force=None, last_update=None, flush=False):
"""
Fields change depending on when you run activity_import,
such as "last_updated" type fields which don't have activity
being tracked, which means we'll always end up with different
hash values, so we need to always remove all existing object
states and import fresh
"""
return self._run_object_import(force=force, last_update=last_update, flush=flush, full_history=True) |
def dump_state(self):
"""Dump the current state of this emulated object as a dictionary.
Note that dump_state happens synchronously in the emulation thread to
avoid any race conditions with accessing data members and ensure a
consistent view of all state data.
Returns:
dict: The current state of the object that could be passed to load_state.
"""
# Dump the state of all of the tiles
def _background_dump():
state = super(ReferenceDevice, self).dump_state()
state['state_name'] = self.STATE_NAME
state['state_version'] = self.STATE_VERSION
state['reset_count'] = self.reset_count
state['received_script'] = base64.b64encode(self.script).decode('utf-8')
return state
return self.synchronize_task(_background_dump) | def function[dump_state, parameter[self]]:
constant[Dump the current state of this emulated object as a dictionary.
Note that dump_state happens synchronously in the emulation thread to
avoid any race conditions with accessing data members and ensure a
consistent view of all state data.
Returns:
dict: The current state of the object that could be passed to load_state.
]
def function[_background_dump, parameter[]]:
variable[state] assign[=] call[call[name[super], parameter[name[ReferenceDevice], name[self]]].dump_state, parameter[]]
call[name[state]][constant[state_name]] assign[=] name[self].STATE_NAME
call[name[state]][constant[state_version]] assign[=] name[self].STATE_VERSION
call[name[state]][constant[reset_count]] assign[=] name[self].reset_count
call[name[state]][constant[received_script]] assign[=] call[call[name[base64].b64encode, parameter[name[self].script]].decode, parameter[constant[utf-8]]]
return[name[state]]
return[call[name[self].synchronize_task, parameter[name[_background_dump]]]] | keyword[def] identifier[dump_state] ( identifier[self] ):
literal[string]
keyword[def] identifier[_background_dump] ():
identifier[state] = identifier[super] ( identifier[ReferenceDevice] , identifier[self] ). identifier[dump_state] ()
identifier[state] [ literal[string] ]= identifier[self] . identifier[STATE_NAME]
identifier[state] [ literal[string] ]= identifier[self] . identifier[STATE_VERSION]
identifier[state] [ literal[string] ]= identifier[self] . identifier[reset_count]
identifier[state] [ literal[string] ]= identifier[base64] . identifier[b64encode] ( identifier[self] . identifier[script] ). identifier[decode] ( literal[string] )
keyword[return] identifier[state]
keyword[return] identifier[self] . identifier[synchronize_task] ( identifier[_background_dump] ) | def dump_state(self):
"""Dump the current state of this emulated object as a dictionary.
Note that dump_state happens synchronously in the emulation thread to
avoid any race conditions with accessing data members and ensure a
consistent view of all state data.
Returns:
dict: The current state of the object that could be passed to load_state.
"""
# Dump the state of all of the tiles
def _background_dump():
state = super(ReferenceDevice, self).dump_state()
state['state_name'] = self.STATE_NAME
state['state_version'] = self.STATE_VERSION
state['reset_count'] = self.reset_count
state['received_script'] = base64.b64encode(self.script).decode('utf-8')
return state
return self.synchronize_task(_background_dump) |
def _tracker_str(item):
"""Returns a string representation of the tracker object for the given item.
Args:
item: object to get tracker for.
fqdn (str): fully-qualified domain name of the object.
"""
instance = tracker(item)
if instance is not None:
if isinstance(instance, str):
return instance
elif isinstance(instance, tuple):
return instance
else:
return instance.uuid
else:
#Must be a simple built-in type like `int` or `float`, in which case we
#don't want to convert it to a string.
return item | def function[_tracker_str, parameter[item]]:
constant[Returns a string representation of the tracker object for the given item.
Args:
item: object to get tracker for.
fqdn (str): fully-qualified domain name of the object.
]
variable[instance] assign[=] call[name[tracker], parameter[name[item]]]
if compare[name[instance] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[instance], name[str]]] begin[:]
return[name[instance]] | keyword[def] identifier[_tracker_str] ( identifier[item] ):
literal[string]
identifier[instance] = identifier[tracker] ( identifier[item] )
keyword[if] identifier[instance] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[instance] , identifier[str] ):
keyword[return] identifier[instance]
keyword[elif] identifier[isinstance] ( identifier[instance] , identifier[tuple] ):
keyword[return] identifier[instance]
keyword[else] :
keyword[return] identifier[instance] . identifier[uuid]
keyword[else] :
keyword[return] identifier[item] | def _tracker_str(item):
"""Returns a string representation of the tracker object for the given item.
Args:
item: object to get tracker for.
fqdn (str): fully-qualified domain name of the object.
"""
instance = tracker(item)
if instance is not None:
if isinstance(instance, str):
return instance # depends on [control=['if'], data=[]]
elif isinstance(instance, tuple):
return instance # depends on [control=['if'], data=[]]
else:
return instance.uuid # depends on [control=['if'], data=['instance']]
else:
#Must be a simple built-in type like `int` or `float`, in which case we
#don't want to convert it to a string.
return item |
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
"""
subj_id = '' # no subject information in the header
hdr = _parse_ini(self.filename)
self.eeg_file = self.filename.parent / hdr['Common Infos']['DataFile']
self.vmrk_file = self.filename.parent / hdr['Common Infos']['MarkerFile']
self.mrk = _parse_ini(self.vmrk_file)
start_time = _read_datetime(self.mrk)
self.s_freq = 1e6 / float(hdr['Common Infos']['SamplingInterval'])
chan_name = [v[0] for v in hdr['Channel Infos'].values()]
self.gain = array([float(v[2]) for v in hdr['Channel Infos'].values()])
# number of samples
self.data_type = BV_DATATYPE[hdr['Binary Infos']['BinaryFormat']]
N_BYTES = dtype(self.data_type).itemsize
n_samples = int(self.eeg_file.stat().st_size / N_BYTES / len(chan_name))
self.dshape = len(chan_name), int(n_samples)
self.data_order = BV_ORIENTATION[hdr['Common Infos']['DataOrientation']]
orig = {
'vhdr': hdr,
'vmrk': self.mrk,
}
return subj_id, start_time, self.s_freq, chan_name, n_samples, orig | def function[return_hdr, parameter[self]]:
constant[Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
]
variable[subj_id] assign[=] constant[]
variable[hdr] assign[=] call[name[_parse_ini], parameter[name[self].filename]]
name[self].eeg_file assign[=] binary_operation[name[self].filename.parent / call[call[name[hdr]][constant[Common Infos]]][constant[DataFile]]]
name[self].vmrk_file assign[=] binary_operation[name[self].filename.parent / call[call[name[hdr]][constant[Common Infos]]][constant[MarkerFile]]]
name[self].mrk assign[=] call[name[_parse_ini], parameter[name[self].vmrk_file]]
variable[start_time] assign[=] call[name[_read_datetime], parameter[name[self].mrk]]
name[self].s_freq assign[=] binary_operation[constant[1000000.0] / call[name[float], parameter[call[call[name[hdr]][constant[Common Infos]]][constant[SamplingInterval]]]]]
variable[chan_name] assign[=] <ast.ListComp object at 0x7da1b0d756c0>
name[self].gain assign[=] call[name[array], parameter[<ast.ListComp object at 0x7da1b0d77c10>]]
name[self].data_type assign[=] call[name[BV_DATATYPE]][call[call[name[hdr]][constant[Binary Infos]]][constant[BinaryFormat]]]
variable[N_BYTES] assign[=] call[name[dtype], parameter[name[self].data_type]].itemsize
variable[n_samples] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[self].eeg_file.stat, parameter[]].st_size / name[N_BYTES]] / call[name[len], parameter[name[chan_name]]]]]]
name[self].dshape assign[=] tuple[[<ast.Call object at 0x7da1b0d75e10>, <ast.Call object at 0x7da1b0d75630>]]
name[self].data_order assign[=] call[name[BV_ORIENTATION]][call[call[name[hdr]][constant[Common Infos]]][constant[DataOrientation]]]
variable[orig] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d76bc0>, <ast.Constant object at 0x7da1b0d77ca0>], [<ast.Name object at 0x7da1b0d76410>, <ast.Attribute object at 0x7da1b0d75ed0>]]
return[tuple[[<ast.Name object at 0x7da1b0d763e0>, <ast.Name object at 0x7da1b0d76500>, <ast.Attribute object at 0x7da1b0d76140>, <ast.Name object at 0x7da1b0d77280>, <ast.Name object at 0x7da1b0d76200>, <ast.Name object at 0x7da1b0d74d90>]]] | keyword[def] identifier[return_hdr] ( identifier[self] ):
literal[string]
identifier[subj_id] = literal[string]
identifier[hdr] = identifier[_parse_ini] ( identifier[self] . identifier[filename] )
identifier[self] . identifier[eeg_file] = identifier[self] . identifier[filename] . identifier[parent] / identifier[hdr] [ literal[string] ][ literal[string] ]
identifier[self] . identifier[vmrk_file] = identifier[self] . identifier[filename] . identifier[parent] / identifier[hdr] [ literal[string] ][ literal[string] ]
identifier[self] . identifier[mrk] = identifier[_parse_ini] ( identifier[self] . identifier[vmrk_file] )
identifier[start_time] = identifier[_read_datetime] ( identifier[self] . identifier[mrk] )
identifier[self] . identifier[s_freq] = literal[int] / identifier[float] ( identifier[hdr] [ literal[string] ][ literal[string] ])
identifier[chan_name] =[ identifier[v] [ literal[int] ] keyword[for] identifier[v] keyword[in] identifier[hdr] [ literal[string] ]. identifier[values] ()]
identifier[self] . identifier[gain] = identifier[array] ([ identifier[float] ( identifier[v] [ literal[int] ]) keyword[for] identifier[v] keyword[in] identifier[hdr] [ literal[string] ]. identifier[values] ()])
identifier[self] . identifier[data_type] = identifier[BV_DATATYPE] [ identifier[hdr] [ literal[string] ][ literal[string] ]]
identifier[N_BYTES] = identifier[dtype] ( identifier[self] . identifier[data_type] ). identifier[itemsize]
identifier[n_samples] = identifier[int] ( identifier[self] . identifier[eeg_file] . identifier[stat] (). identifier[st_size] / identifier[N_BYTES] / identifier[len] ( identifier[chan_name] ))
identifier[self] . identifier[dshape] = identifier[len] ( identifier[chan_name] ), identifier[int] ( identifier[n_samples] )
identifier[self] . identifier[data_order] = identifier[BV_ORIENTATION] [ identifier[hdr] [ literal[string] ][ literal[string] ]]
identifier[orig] ={
literal[string] : identifier[hdr] ,
literal[string] : identifier[self] . identifier[mrk] ,
}
keyword[return] identifier[subj_id] , identifier[start_time] , identifier[self] . identifier[s_freq] , identifier[chan_name] , identifier[n_samples] , identifier[orig] | def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
"""
subj_id = '' # no subject information in the header
hdr = _parse_ini(self.filename)
self.eeg_file = self.filename.parent / hdr['Common Infos']['DataFile']
self.vmrk_file = self.filename.parent / hdr['Common Infos']['MarkerFile']
self.mrk = _parse_ini(self.vmrk_file)
start_time = _read_datetime(self.mrk)
self.s_freq = 1000000.0 / float(hdr['Common Infos']['SamplingInterval'])
chan_name = [v[0] for v in hdr['Channel Infos'].values()]
self.gain = array([float(v[2]) for v in hdr['Channel Infos'].values()])
# number of samples
self.data_type = BV_DATATYPE[hdr['Binary Infos']['BinaryFormat']]
N_BYTES = dtype(self.data_type).itemsize
n_samples = int(self.eeg_file.stat().st_size / N_BYTES / len(chan_name))
self.dshape = (len(chan_name), int(n_samples))
self.data_order = BV_ORIENTATION[hdr['Common Infos']['DataOrientation']]
orig = {'vhdr': hdr, 'vmrk': self.mrk}
return (subj_id, start_time, self.s_freq, chan_name, n_samples, orig) |
def getEdges(self, fromVol):
""" Return the edges available from fromVol. """
if fromVol is None:
for toVol in self.paths:
yield Store.Diff(self, toVol, fromVol, toVol.size)
return
if fromVol not in self.paths:
return
fromBVol = self.butterVolumes[fromVol.uuid]
parentUUID = fromBVol.parent_uuid
butterDir = os.path.dirname(fromBVol.fullPath)
vols = [vol for vol in self.butterVolumes.values()
if vol.parent_uuid == parentUUID or
os.path.dirname(vol.fullPath) == butterDir
]
changeRate = self._calcChangeRate(vols)
for toBVol in vols:
if toBVol == fromBVol:
continue
# This gives a conservative estimate of the size of the diff
estimatedSize = self._estimateSize(toBVol, fromBVol, changeRate)
toVol = self._btrfsVol2StoreVol(toBVol)
yield Store.Diff(self, toVol, fromVol, estimatedSize, sizeIsEstimated=True) | def function[getEdges, parameter[self, fromVol]]:
constant[ Return the edges available from fromVol. ]
if compare[name[fromVol] is constant[None]] begin[:]
for taget[name[toVol]] in starred[name[self].paths] begin[:]
<ast.Yield object at 0x7da1b2845db0>
return[None]
if compare[name[fromVol] <ast.NotIn object at 0x7da2590d7190> name[self].paths] begin[:]
return[None]
variable[fromBVol] assign[=] call[name[self].butterVolumes][name[fromVol].uuid]
variable[parentUUID] assign[=] name[fromBVol].parent_uuid
variable[butterDir] assign[=] call[name[os].path.dirname, parameter[name[fromBVol].fullPath]]
variable[vols] assign[=] <ast.ListComp object at 0x7da18fe93c10>
variable[changeRate] assign[=] call[name[self]._calcChangeRate, parameter[name[vols]]]
for taget[name[toBVol]] in starred[name[vols]] begin[:]
if compare[name[toBVol] equal[==] name[fromBVol]] begin[:]
continue
variable[estimatedSize] assign[=] call[name[self]._estimateSize, parameter[name[toBVol], name[fromBVol], name[changeRate]]]
variable[toVol] assign[=] call[name[self]._btrfsVol2StoreVol, parameter[name[toBVol]]]
<ast.Yield object at 0x7da1b27ee380> | keyword[def] identifier[getEdges] ( identifier[self] , identifier[fromVol] ):
literal[string]
keyword[if] identifier[fromVol] keyword[is] keyword[None] :
keyword[for] identifier[toVol] keyword[in] identifier[self] . identifier[paths] :
keyword[yield] identifier[Store] . identifier[Diff] ( identifier[self] , identifier[toVol] , identifier[fromVol] , identifier[toVol] . identifier[size] )
keyword[return]
keyword[if] identifier[fromVol] keyword[not] keyword[in] identifier[self] . identifier[paths] :
keyword[return]
identifier[fromBVol] = identifier[self] . identifier[butterVolumes] [ identifier[fromVol] . identifier[uuid] ]
identifier[parentUUID] = identifier[fromBVol] . identifier[parent_uuid]
identifier[butterDir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[fromBVol] . identifier[fullPath] )
identifier[vols] =[ identifier[vol] keyword[for] identifier[vol] keyword[in] identifier[self] . identifier[butterVolumes] . identifier[values] ()
keyword[if] identifier[vol] . identifier[parent_uuid] == identifier[parentUUID] keyword[or]
identifier[os] . identifier[path] . identifier[dirname] ( identifier[vol] . identifier[fullPath] )== identifier[butterDir]
]
identifier[changeRate] = identifier[self] . identifier[_calcChangeRate] ( identifier[vols] )
keyword[for] identifier[toBVol] keyword[in] identifier[vols] :
keyword[if] identifier[toBVol] == identifier[fromBVol] :
keyword[continue]
identifier[estimatedSize] = identifier[self] . identifier[_estimateSize] ( identifier[toBVol] , identifier[fromBVol] , identifier[changeRate] )
identifier[toVol] = identifier[self] . identifier[_btrfsVol2StoreVol] ( identifier[toBVol] )
keyword[yield] identifier[Store] . identifier[Diff] ( identifier[self] , identifier[toVol] , identifier[fromVol] , identifier[estimatedSize] , identifier[sizeIsEstimated] = keyword[True] ) | def getEdges(self, fromVol):
""" Return the edges available from fromVol. """
if fromVol is None:
for toVol in self.paths:
yield Store.Diff(self, toVol, fromVol, toVol.size) # depends on [control=['for'], data=['toVol']]
return # depends on [control=['if'], data=['fromVol']]
if fromVol not in self.paths:
return # depends on [control=['if'], data=[]]
fromBVol = self.butterVolumes[fromVol.uuid]
parentUUID = fromBVol.parent_uuid
butterDir = os.path.dirname(fromBVol.fullPath)
vols = [vol for vol in self.butterVolumes.values() if vol.parent_uuid == parentUUID or os.path.dirname(vol.fullPath) == butterDir]
changeRate = self._calcChangeRate(vols)
for toBVol in vols:
if toBVol == fromBVol:
continue # depends on [control=['if'], data=[]]
# This gives a conservative estimate of the size of the diff
estimatedSize = self._estimateSize(toBVol, fromBVol, changeRate)
toVol = self._btrfsVol2StoreVol(toBVol)
yield Store.Diff(self, toVol, fromVol, estimatedSize, sizeIsEstimated=True) # depends on [control=['for'], data=['toBVol']] |
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title)
return plot | def function[mmGetCellTracePlot, parameter[self, cellTrace, cellCount, activityType, title, showReset, resetShading]]:
constant[
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
]
variable[plot] assign[=] call[name[Plot], parameter[name[self], name[title]]]
variable[resetTrace] assign[=] call[name[self].mmGetTraceResets, parameter[]].data
variable[data] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da20c6ab250>, <ast.Constant object at 0x7da20c6ab070>]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[cellTrace]]]]]] begin[:]
if <ast.BoolOp object at 0x7da20c6aa1a0> begin[:]
variable[activity] assign[=] binary_operation[call[name[numpy].ones, parameter[tuple[[<ast.Name object at 0x7da20c6a9840>, <ast.Constant object at 0x7da20c6aa980>]]]] * name[resetShading]]
variable[activeIndices] assign[=] call[name[cellTrace]][name[i]]
call[name[activity]][call[name[list], parameter[name[activeIndices]]]] assign[=] constant[1]
variable[data] assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.Name object at 0x7da20c6ab2e0>, <ast.Name object at 0x7da20c6abc70>]], constant[1]]]
call[name[plot].add2DArray, parameter[name[data]]]
return[name[plot]] | keyword[def] identifier[mmGetCellTracePlot] ( identifier[self] , identifier[cellTrace] , identifier[cellCount] , identifier[activityType] , identifier[title] = literal[string] ,
identifier[showReset] = keyword[False] , identifier[resetShading] = literal[int] ):
literal[string]
identifier[plot] = identifier[Plot] ( identifier[self] , identifier[title] )
identifier[resetTrace] = identifier[self] . identifier[mmGetTraceResets] (). identifier[data]
identifier[data] = identifier[numpy] . identifier[zeros] (( identifier[cellCount] , literal[int] ))
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[cellTrace] )):
keyword[if] identifier[showReset] keyword[and] identifier[resetTrace] [ identifier[i] ]:
identifier[activity] = identifier[numpy] . identifier[ones] (( identifier[cellCount] , literal[int] ))* identifier[resetShading]
keyword[else] :
identifier[activity] = identifier[numpy] . identifier[zeros] (( identifier[cellCount] , literal[int] ))
identifier[activeIndices] = identifier[cellTrace] [ identifier[i] ]
identifier[activity] [ identifier[list] ( identifier[activeIndices] )]= literal[int]
identifier[data] = identifier[numpy] . identifier[concatenate] (( identifier[data] , identifier[activity] ), literal[int] )
identifier[plot] . identifier[add2DArray] ( identifier[data] , identifier[xlabel] = literal[string] , identifier[ylabel] = identifier[activityType] , identifier[name] = identifier[title] )
keyword[return] identifier[plot] | def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title='', showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading # depends on [control=['if'], data=[]]
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1) # depends on [control=['for'], data=['i']]
plot.add2DArray(data, xlabel='Time', ylabel=activityType, name=title)
return plot |
def _generate_publish_headers(self):
"""
generate the headers for the connection to event hub service based on the provided config
:return: {} headers
"""
headers = {
'predix-zone-id': self.eventhub_client.zone_id
}
token = self.eventhub_client.service._get_bearer_token()
if self.config.is_grpc():
headers['authorization'] = token[(token.index(' ') + 1):]
else:
headers['authorization'] = token
if self.config.topic == '':
headers['topic'] = self.eventhub_client.zone_id + '_topic'
else:
headers['topic'] = self.config.topic
if self.config.publish_type == self.config.Type.SYNC:
headers['sync-acks'] = 'true'
else:
headers['sync-acks'] = 'false'
headers['send-acks-interval'] = str(self.config.async_cache_ack_interval_millis)
headers['acks'] = str(self.config.async_enable_acks).lower()
headers['nacks'] = str(self.config.async_enable_nacks_only).lower()
headers['cache-acks'] = str(self.config.async_cache_acks_and_nacks).lower()
return headers | def function[_generate_publish_headers, parameter[self]]:
constant[
generate the headers for the connection to event hub service based on the provided config
:return: {} headers
]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da20e9633a0>], [<ast.Attribute object at 0x7da20e963af0>]]
variable[token] assign[=] call[name[self].eventhub_client.service._get_bearer_token, parameter[]]
if call[name[self].config.is_grpc, parameter[]] begin[:]
call[name[headers]][constant[authorization]] assign[=] call[name[token]][<ast.Slice object at 0x7da20e9621d0>]
if compare[name[self].config.topic equal[==] constant[]] begin[:]
call[name[headers]][constant[topic]] assign[=] binary_operation[name[self].eventhub_client.zone_id + constant[_topic]]
if compare[name[self].config.publish_type equal[==] name[self].config.Type.SYNC] begin[:]
call[name[headers]][constant[sync-acks]] assign[=] constant[true]
return[name[headers]] | keyword[def] identifier[_generate_publish_headers] ( identifier[self] ):
literal[string]
identifier[headers] ={
literal[string] : identifier[self] . identifier[eventhub_client] . identifier[zone_id]
}
identifier[token] = identifier[self] . identifier[eventhub_client] . identifier[service] . identifier[_get_bearer_token] ()
keyword[if] identifier[self] . identifier[config] . identifier[is_grpc] ():
identifier[headers] [ literal[string] ]= identifier[token] [( identifier[token] . identifier[index] ( literal[string] )+ literal[int] ):]
keyword[else] :
identifier[headers] [ literal[string] ]= identifier[token]
keyword[if] identifier[self] . identifier[config] . identifier[topic] == literal[string] :
identifier[headers] [ literal[string] ]= identifier[self] . identifier[eventhub_client] . identifier[zone_id] + literal[string]
keyword[else] :
identifier[headers] [ literal[string] ]= identifier[self] . identifier[config] . identifier[topic]
keyword[if] identifier[self] . identifier[config] . identifier[publish_type] == identifier[self] . identifier[config] . identifier[Type] . identifier[SYNC] :
identifier[headers] [ literal[string] ]= literal[string]
keyword[else] :
identifier[headers] [ literal[string] ]= literal[string]
identifier[headers] [ literal[string] ]= identifier[str] ( identifier[self] . identifier[config] . identifier[async_cache_ack_interval_millis] )
identifier[headers] [ literal[string] ]= identifier[str] ( identifier[self] . identifier[config] . identifier[async_enable_acks] ). identifier[lower] ()
identifier[headers] [ literal[string] ]= identifier[str] ( identifier[self] . identifier[config] . identifier[async_enable_nacks_only] ). identifier[lower] ()
identifier[headers] [ literal[string] ]= identifier[str] ( identifier[self] . identifier[config] . identifier[async_cache_acks_and_nacks] ). identifier[lower] ()
keyword[return] identifier[headers] | def _generate_publish_headers(self):
"""
generate the headers for the connection to event hub service based on the provided config
:return: {} headers
"""
headers = {'predix-zone-id': self.eventhub_client.zone_id}
token = self.eventhub_client.service._get_bearer_token()
if self.config.is_grpc():
headers['authorization'] = token[token.index(' ') + 1:] # depends on [control=['if'], data=[]]
else:
headers['authorization'] = token
if self.config.topic == '':
headers['topic'] = self.eventhub_client.zone_id + '_topic' # depends on [control=['if'], data=[]]
else:
headers['topic'] = self.config.topic
if self.config.publish_type == self.config.Type.SYNC:
headers['sync-acks'] = 'true' # depends on [control=['if'], data=[]]
else:
headers['sync-acks'] = 'false'
headers['send-acks-interval'] = str(self.config.async_cache_ack_interval_millis)
headers['acks'] = str(self.config.async_enable_acks).lower()
headers['nacks'] = str(self.config.async_enable_nacks_only).lower()
headers['cache-acks'] = str(self.config.async_cache_acks_and_nacks).lower()
return headers |
def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement)) | def function[_check_error, parameter[response]]:
constant[Checks for JSON error messages and raises Python exception]
if compare[constant[error] in name[response]] begin[:]
<ast.Raise object at 0x7da18bcc97b0> | keyword[def] identifier[_check_error] ( identifier[response] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[response] :
keyword[raise] identifier[InfluxDBError] ( identifier[response] [ literal[string] ])
keyword[elif] literal[string] keyword[in] identifier[response] :
keyword[for] identifier[statement] keyword[in] identifier[response] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[statement] :
identifier[msg] = literal[string]
keyword[raise] identifier[InfluxDBError] ( identifier[msg] . identifier[format] ( identifier[d] = identifier[statement] )) | def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error']) # depends on [control=['if'], data=['response']]
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement)) # depends on [control=['if'], data=['statement']] # depends on [control=['for'], data=['statement']] # depends on [control=['if'], data=['response']] |
async def unplonk(self, ctx, *, member: discord.Member):
"""Unbans a user from using the bot.
To use this command you must have the Manage Server permission
or have a Bot Admin role.
"""
plonks = self.config.get('plonks', {})
guild_id = ctx.message.server.id
db = plonks.get(guild_id, [])
try:
db.remove(member.id)
except ValueError:
await self.bot.responses.failure(message='%s is not banned from using the bot in this server.' % member)
else:
plonks[guild_id] = db
await self.config.put('plonks', plonks)
await self.bot.responses.success(message='%s has been unbanned from using the bot in this server.' % member) | <ast.AsyncFunctionDef object at 0x7da1b281a830> | keyword[async] keyword[def] identifier[unplonk] ( identifier[self] , identifier[ctx] ,*, identifier[member] : identifier[discord] . identifier[Member] ):
literal[string]
identifier[plonks] = identifier[self] . identifier[config] . identifier[get] ( literal[string] ,{})
identifier[guild_id] = identifier[ctx] . identifier[message] . identifier[server] . identifier[id]
identifier[db] = identifier[plonks] . identifier[get] ( identifier[guild_id] ,[])
keyword[try] :
identifier[db] . identifier[remove] ( identifier[member] . identifier[id] )
keyword[except] identifier[ValueError] :
keyword[await] identifier[self] . identifier[bot] . identifier[responses] . identifier[failure] ( identifier[message] = literal[string] % identifier[member] )
keyword[else] :
identifier[plonks] [ identifier[guild_id] ]= identifier[db]
keyword[await] identifier[self] . identifier[config] . identifier[put] ( literal[string] , identifier[plonks] )
keyword[await] identifier[self] . identifier[bot] . identifier[responses] . identifier[success] ( identifier[message] = literal[string] % identifier[member] ) | async def unplonk(self, ctx, *, member: discord.Member):
"""Unbans a user from using the bot.
To use this command you must have the Manage Server permission
or have a Bot Admin role.
"""
plonks = self.config.get('plonks', {})
guild_id = ctx.message.server.id
db = plonks.get(guild_id, [])
try:
db.remove(member.id) # depends on [control=['try'], data=[]]
except ValueError:
await self.bot.responses.failure(message='%s is not banned from using the bot in this server.' % member) # depends on [control=['except'], data=[]]
else:
plonks[guild_id] = db
await self.config.put('plonks', plonks)
await self.bot.responses.success(message='%s has been unbanned from using the bot in this server.' % member) |
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels | def function[_shape, parameter[self, df]]:
constant[
Calculate table chape considering index levels.
]
<ast.Tuple object at 0x7da18dc9a140> assign[=] name[df].shape
return[tuple[[<ast.BinOp object at 0x7da18dc982b0>, <ast.BinOp object at 0x7da18dc99990>]]] | keyword[def] identifier[_shape] ( identifier[self] , identifier[df] ):
literal[string]
identifier[row] , identifier[col] = identifier[df] . identifier[shape]
keyword[return] identifier[row] + identifier[df] . identifier[columns] . identifier[nlevels] , identifier[col] + identifier[df] . identifier[index] . identifier[nlevels] | def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
(row, col) = df.shape
return (row + df.columns.nlevels, col + df.index.nlevels) |
def get_project_by_network_id(network_id,**kwargs):
"""
get a project complexmodel by a network_id
"""
user_id = kwargs.get('user_id')
projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id==Network.project_id).filter(
Network.id==network_id,
ProjectOwner.user_id==user_id).order_by('name').all()
ret_project = None
for project_i in projects_i:
try:
project_i.check_read_permission(user_id)
ret_project = project_i
except:
log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id)
return ret_project | def function[get_project_by_network_id, parameter[network_id]]:
constant[
get a project complexmodel by a network_id
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
variable[projects_i] assign[=] call[call[call[call[call[call[name[db].DBSession.query, parameter[name[Project]]].join, parameter[name[ProjectOwner]]].join, parameter[name[Network], compare[name[Project].id equal[==] name[Network].project_id]]].filter, parameter[compare[name[Network].id equal[==] name[network_id]], compare[name[ProjectOwner].user_id equal[==] name[user_id]]]].order_by, parameter[constant[name]]].all, parameter[]]
variable[ret_project] assign[=] constant[None]
for taget[name[project_i]] in starred[name[projects_i]] begin[:]
<ast.Try object at 0x7da18f810070>
return[name[ret_project]] | keyword[def] identifier[get_project_by_network_id] ( identifier[network_id] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[projects_i] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Project] ). identifier[join] ( identifier[ProjectOwner] ). identifier[join] ( identifier[Network] , identifier[Project] . identifier[id] == identifier[Network] . identifier[project_id] ). identifier[filter] (
identifier[Network] . identifier[id] == identifier[network_id] ,
identifier[ProjectOwner] . identifier[user_id] == identifier[user_id] ). identifier[order_by] ( literal[string] ). identifier[all] ()
identifier[ret_project] = keyword[None]
keyword[for] identifier[project_i] keyword[in] identifier[projects_i] :
keyword[try] :
identifier[project_i] . identifier[check_read_permission] ( identifier[user_id] )
identifier[ret_project] = identifier[project_i]
keyword[except] :
identifier[log] . identifier[info] ( literal[string] , identifier[project_i] . identifier[id] , identifier[user_id] )
keyword[return] identifier[ret_project] | def get_project_by_network_id(network_id, **kwargs):
"""
get a project complexmodel by a network_id
"""
user_id = kwargs.get('user_id')
projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id == Network.project_id).filter(Network.id == network_id, ProjectOwner.user_id == user_id).order_by('name').all()
ret_project = None
for project_i in projects_i:
try:
project_i.check_read_permission(user_id)
ret_project = project_i # depends on [control=['try'], data=[]]
except:
log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['project_i']]
return ret_project |
def ruamelindex(self, strictindex):
"""
Get the ruamel equivalent of a strict parsed index.
E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify
"""
return (
self.key_association.get(strictindex, strictindex)
if self.is_mapping()
else strictindex
) | def function[ruamelindex, parameter[self, strictindex]]:
constant[
Get the ruamel equivalent of a strict parsed index.
E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify
]
return[<ast.IfExp object at 0x7da20c6e7cd0>] | keyword[def] identifier[ruamelindex] ( identifier[self] , identifier[strictindex] ):
literal[string]
keyword[return] (
identifier[self] . identifier[key_association] . identifier[get] ( identifier[strictindex] , identifier[strictindex] )
keyword[if] identifier[self] . identifier[is_mapping] ()
keyword[else] identifier[strictindex]
) | def ruamelindex(self, strictindex):
"""
Get the ruamel equivalent of a strict parsed index.
E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify
"""
return self.key_association.get(strictindex, strictindex) if self.is_mapping() else strictindex |
def main(argv):
"""Validate a model's performance on a set of holdout data."""
_, *validation_paths = argv
if FLAGS.expand_validation_dirs:
tf_records = []
with utils.logged_timer("Building lists of holdout files"):
for record_dir in validation_paths:
tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))
else:
tf_records = validation_paths
if not tf_records:
raise RuntimeError("Did not find any holdout files for validating!")
validate(*tf_records) | def function[main, parameter[argv]]:
constant[Validate a model's performance on a set of holdout data.]
<ast.Tuple object at 0x7da1b1b013c0> assign[=] name[argv]
if name[FLAGS].expand_validation_dirs begin[:]
variable[tf_records] assign[=] list[[]]
with call[name[utils].logged_timer, parameter[constant[Building lists of holdout files]]] begin[:]
for taget[name[record_dir]] in starred[name[validation_paths]] begin[:]
call[name[tf_records].extend, parameter[call[name[gfile].Glob, parameter[call[name[os].path.join, parameter[name[record_dir], constant[*.zz]]]]]]]
if <ast.UnaryOp object at 0x7da1b1b026e0> begin[:]
<ast.Raise object at 0x7da1b1b01270>
call[name[validate], parameter[<ast.Starred object at 0x7da1b1b00df0>]] | keyword[def] identifier[main] ( identifier[argv] ):
literal[string]
identifier[_] ,* identifier[validation_paths] = identifier[argv]
keyword[if] identifier[FLAGS] . identifier[expand_validation_dirs] :
identifier[tf_records] =[]
keyword[with] identifier[utils] . identifier[logged_timer] ( literal[string] ):
keyword[for] identifier[record_dir] keyword[in] identifier[validation_paths] :
identifier[tf_records] . identifier[extend] ( identifier[gfile] . identifier[Glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[record_dir] , literal[string] )))
keyword[else] :
identifier[tf_records] = identifier[validation_paths]
keyword[if] keyword[not] identifier[tf_records] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[validate] (* identifier[tf_records] ) | def main(argv):
"""Validate a model's performance on a set of holdout data."""
(_, *validation_paths) = argv
if FLAGS.expand_validation_dirs:
tf_records = []
with utils.logged_timer('Building lists of holdout files'):
for record_dir in validation_paths:
tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz'))) # depends on [control=['for'], data=['record_dir']] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
else:
tf_records = validation_paths
if not tf_records:
raise RuntimeError('Did not find any holdout files for validating!') # depends on [control=['if'], data=[]]
validate(*tf_records) |
def get_network_summary_dict(graph: BELGraph) -> Mapping:
"""Create a summary dictionary."""
return dict(
# Counters
function_count=count_functions(graph),
modifications_count=get_modifications_count(graph),
relation_count=count_relations(graph),
authors_count=count_authors(graph).most_common(15),
variants_count=count_variants(graph),
namespaces_count=count_namespaces(graph),
hub_data={
(
node.name or node.identifier
if NAME in node or IDENTIFIER in node else
str(node)
): degree
for node, degree in get_top_hubs(graph, n=15)
},
disease_data={
(
node.name or node.identifier
if NAME in node or IDENTIFIER in node else
str(node)
): count
for node, count in get_top_pathologies(graph, n=15)
},
# BioGrammar
regulatory_pairs=[
get_pair_tuple(u, v)
for u, v in get_regulatory_pairs(graph)
],
unstable_pairs=list(itt.chain(
(get_pair_tuple(u, v) + ('Chaotic',) for u, v, in get_chaotic_pairs(graph)),
(get_pair_tuple(u, v) + ('Dampened',) for u, v, in get_dampened_pairs(graph)),
)),
contradictory_pairs=[
get_pair_tuple(u, v) + (relation,)
for u, v, relation in get_contradiction_summary(graph)
],
contradictory_triplets=list(itt.chain(
(get_triplet_tuple(a, b, c) + ('Separate',) for a, b, c in
get_separate_unstable_correlation_triples(graph)),
(get_triplet_tuple(a, b, c) + ('Mutual',) for a, b, c in get_mutually_unstable_correlation_triples(graph)),
(get_triplet_tuple(a, b, c) + ('Jens',) for a, b, c in get_jens_unstable(graph)),
(get_triplet_tuple(a, b, c) + ('Increase Mismatch',) for a, b, c in get_increase_mismatch_triplets(graph)),
(get_triplet_tuple(a, b, c) + ('Decrease Mismatch',) for a, b, c in get_decrease_mismatch_triplets(graph)),
)),
unstable_triplets=list(itt.chain(
(get_triplet_tuple(a, b, c) + ('Chaotic',) for a, b, c in get_chaotic_triplets(graph)),
(get_triplet_tuple(a, b, c) + ('Dampened',) for a, b, c in get_dampened_triplets(graph)),
)),
causal_pathologies=sorted({
get_pair_tuple(u, v) + (graph[u][v][k][RELATION],)
for u, v, k in filter_edges(graph, has_pathology_causal)
}),
# Misc.
undefined_namespaces=get_undefined_namespaces(graph),
undefined_annotations=get_undefined_annotations(graph),
namespaces_with_incorrect_names=get_namespaces_with_incorrect_names(graph),
unused_namespaces=get_unused_namespaces(graph),
unused_annotations=get_unused_annotations(graph),
unused_list_annotation_values=get_unused_list_annotation_values(graph),
naked_names=get_naked_names(graph),
error_count=count_error_types(graph),
# Errors
error_groups=get_most_common_errors(graph),
syntax_errors=get_syntax_errors(graph),
# Bibliometrics
citation_years=get_citation_years(graph),
confidence_count=count_confidences(graph),
) | def function[get_network_summary_dict, parameter[graph]]:
constant[Create a summary dictionary.]
return[call[name[dict], parameter[]]] | keyword[def] identifier[get_network_summary_dict] ( identifier[graph] : identifier[BELGraph] )-> identifier[Mapping] :
literal[string]
keyword[return] identifier[dict] (
identifier[function_count] = identifier[count_functions] ( identifier[graph] ),
identifier[modifications_count] = identifier[get_modifications_count] ( identifier[graph] ),
identifier[relation_count] = identifier[count_relations] ( identifier[graph] ),
identifier[authors_count] = identifier[count_authors] ( identifier[graph] ). identifier[most_common] ( literal[int] ),
identifier[variants_count] = identifier[count_variants] ( identifier[graph] ),
identifier[namespaces_count] = identifier[count_namespaces] ( identifier[graph] ),
identifier[hub_data] ={
(
identifier[node] . identifier[name] keyword[or] identifier[node] . identifier[identifier]
keyword[if] identifier[NAME] keyword[in] identifier[node] keyword[or] identifier[IDENTIFIER] keyword[in] identifier[node] keyword[else]
identifier[str] ( identifier[node] )
): identifier[degree]
keyword[for] identifier[node] , identifier[degree] keyword[in] identifier[get_top_hubs] ( identifier[graph] , identifier[n] = literal[int] )
},
identifier[disease_data] ={
(
identifier[node] . identifier[name] keyword[or] identifier[node] . identifier[identifier]
keyword[if] identifier[NAME] keyword[in] identifier[node] keyword[or] identifier[IDENTIFIER] keyword[in] identifier[node] keyword[else]
identifier[str] ( identifier[node] )
): identifier[count]
keyword[for] identifier[node] , identifier[count] keyword[in] identifier[get_top_pathologies] ( identifier[graph] , identifier[n] = literal[int] )
},
identifier[regulatory_pairs] =[
identifier[get_pair_tuple] ( identifier[u] , identifier[v] )
keyword[for] identifier[u] , identifier[v] keyword[in] identifier[get_regulatory_pairs] ( identifier[graph] )
],
identifier[unstable_pairs] = identifier[list] ( identifier[itt] . identifier[chain] (
( identifier[get_pair_tuple] ( identifier[u] , identifier[v] )+( literal[string] ,) keyword[for] identifier[u] , identifier[v] , keyword[in] identifier[get_chaotic_pairs] ( identifier[graph] )),
( identifier[get_pair_tuple] ( identifier[u] , identifier[v] )+( literal[string] ,) keyword[for] identifier[u] , identifier[v] , keyword[in] identifier[get_dampened_pairs] ( identifier[graph] )),
)),
identifier[contradictory_pairs] =[
identifier[get_pair_tuple] ( identifier[u] , identifier[v] )+( identifier[relation] ,)
keyword[for] identifier[u] , identifier[v] , identifier[relation] keyword[in] identifier[get_contradiction_summary] ( identifier[graph] )
],
identifier[contradictory_triplets] = identifier[list] ( identifier[itt] . identifier[chain] (
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in]
identifier[get_separate_unstable_correlation_triples] ( identifier[graph] )),
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_mutually_unstable_correlation_triples] ( identifier[graph] )),
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_jens_unstable] ( identifier[graph] )),
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_increase_mismatch_triplets] ( identifier[graph] )),
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_decrease_mismatch_triplets] ( identifier[graph] )),
)),
identifier[unstable_triplets] = identifier[list] ( identifier[itt] . identifier[chain] (
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_chaotic_triplets] ( identifier[graph] )),
( identifier[get_triplet_tuple] ( identifier[a] , identifier[b] , identifier[c] )+( literal[string] ,) keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_dampened_triplets] ( identifier[graph] )),
)),
identifier[causal_pathologies] = identifier[sorted] ({
identifier[get_pair_tuple] ( identifier[u] , identifier[v] )+( identifier[graph] [ identifier[u] ][ identifier[v] ][ identifier[k] ][ identifier[RELATION] ],)
keyword[for] identifier[u] , identifier[v] , identifier[k] keyword[in] identifier[filter_edges] ( identifier[graph] , identifier[has_pathology_causal] )
}),
identifier[undefined_namespaces] = identifier[get_undefined_namespaces] ( identifier[graph] ),
identifier[undefined_annotations] = identifier[get_undefined_annotations] ( identifier[graph] ),
identifier[namespaces_with_incorrect_names] = identifier[get_namespaces_with_incorrect_names] ( identifier[graph] ),
identifier[unused_namespaces] = identifier[get_unused_namespaces] ( identifier[graph] ),
identifier[unused_annotations] = identifier[get_unused_annotations] ( identifier[graph] ),
identifier[unused_list_annotation_values] = identifier[get_unused_list_annotation_values] ( identifier[graph] ),
identifier[naked_names] = identifier[get_naked_names] ( identifier[graph] ),
identifier[error_count] = identifier[count_error_types] ( identifier[graph] ),
identifier[error_groups] = identifier[get_most_common_errors] ( identifier[graph] ),
identifier[syntax_errors] = identifier[get_syntax_errors] ( identifier[graph] ),
identifier[citation_years] = identifier[get_citation_years] ( identifier[graph] ),
identifier[confidence_count] = identifier[count_confidences] ( identifier[graph] ),
) | def get_network_summary_dict(graph: BELGraph) -> Mapping:
"""Create a summary dictionary."""
# Counters
# BioGrammar
# Misc.
# Errors
# Bibliometrics
return dict(function_count=count_functions(graph), modifications_count=get_modifications_count(graph), relation_count=count_relations(graph), authors_count=count_authors(graph).most_common(15), variants_count=count_variants(graph), namespaces_count=count_namespaces(graph), hub_data={node.name or node.identifier if NAME in node or IDENTIFIER in node else str(node): degree for (node, degree) in get_top_hubs(graph, n=15)}, disease_data={node.name or node.identifier if NAME in node or IDENTIFIER in node else str(node): count for (node, count) in get_top_pathologies(graph, n=15)}, regulatory_pairs=[get_pair_tuple(u, v) for (u, v) in get_regulatory_pairs(graph)], unstable_pairs=list(itt.chain((get_pair_tuple(u, v) + ('Chaotic',) for (u, v) in get_chaotic_pairs(graph)), (get_pair_tuple(u, v) + ('Dampened',) for (u, v) in get_dampened_pairs(graph)))), contradictory_pairs=[get_pair_tuple(u, v) + (relation,) for (u, v, relation) in get_contradiction_summary(graph)], contradictory_triplets=list(itt.chain((get_triplet_tuple(a, b, c) + ('Separate',) for (a, b, c) in get_separate_unstable_correlation_triples(graph)), (get_triplet_tuple(a, b, c) + ('Mutual',) for (a, b, c) in get_mutually_unstable_correlation_triples(graph)), (get_triplet_tuple(a, b, c) + ('Jens',) for (a, b, c) in get_jens_unstable(graph)), (get_triplet_tuple(a, b, c) + ('Increase Mismatch',) for (a, b, c) in get_increase_mismatch_triplets(graph)), (get_triplet_tuple(a, b, c) + ('Decrease Mismatch',) for (a, b, c) in get_decrease_mismatch_triplets(graph)))), unstable_triplets=list(itt.chain((get_triplet_tuple(a, b, c) + ('Chaotic',) for (a, b, c) in get_chaotic_triplets(graph)), (get_triplet_tuple(a, b, c) + ('Dampened',) for (a, b, c) in get_dampened_triplets(graph)))), causal_pathologies=sorted({get_pair_tuple(u, v) + (graph[u][v][k][RELATION],) for (u, v, k) in filter_edges(graph, has_pathology_causal)}), undefined_namespaces=get_undefined_namespaces(graph), undefined_annotations=get_undefined_annotations(graph), namespaces_with_incorrect_names=get_namespaces_with_incorrect_names(graph), unused_namespaces=get_unused_namespaces(graph), unused_annotations=get_unused_annotations(graph), unused_list_annotation_values=get_unused_list_annotation_values(graph), naked_names=get_naked_names(graph), error_count=count_error_types(graph), error_groups=get_most_common_errors(graph), syntax_errors=get_syntax_errors(graph), citation_years=get_citation_years(graph), confidence_count=count_confidences(graph)) |
def purge(self):
"""
Purge the stream. This removes all data and clears the calculated intervals
:return: None
"""
self.channel.purge_stream(self.stream_id, remove_definition=False, sandbox=None) | def function[purge, parameter[self]]:
constant[
Purge the stream. This removes all data and clears the calculated intervals
:return: None
]
call[name[self].channel.purge_stream, parameter[name[self].stream_id]] | keyword[def] identifier[purge] ( identifier[self] ):
literal[string]
identifier[self] . identifier[channel] . identifier[purge_stream] ( identifier[self] . identifier[stream_id] , identifier[remove_definition] = keyword[False] , identifier[sandbox] = keyword[None] ) | def purge(self):
"""
Purge the stream. This removes all data and clears the calculated intervals
:return: None
"""
self.channel.purge_stream(self.stream_id, remove_definition=False, sandbox=None) |
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
"""
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self.getId()
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
dataset = self.getParentContainer().getParentContainer()
readGroup.dataset_id = dataset.getId()
readGroup.name = self.getLocalId()
readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize())
referenceSet = self._parentContainer.getReferenceSet()
readGroup.sample_name = pb.string(self.getSampleName())
readGroup.biosample_id = pb.string(self.getBiosampleId())
if referenceSet is not None:
readGroup.reference_set_id = referenceSet.getId()
readGroup.stats.CopyFrom(self.getStats())
readGroup.programs.extend(self.getPrograms())
readGroup.description = pb.string(self.getDescription())
readGroup.experiment.CopyFrom(self.getExperiment())
self.serializeAttributes(readGroup)
return readGroup | def function[toProtocolElement, parameter[self]]:
constant[
Returns the GA4GH protocol representation of this ReadGroup.
]
variable[readGroup] assign[=] call[name[protocol].ReadGroup, parameter[]]
name[readGroup].id assign[=] call[name[self].getId, parameter[]]
name[readGroup].created assign[=] name[self]._creationTime
name[readGroup].updated assign[=] name[self]._updateTime
variable[dataset] assign[=] call[call[name[self].getParentContainer, parameter[]].getParentContainer, parameter[]]
name[readGroup].dataset_id assign[=] call[name[dataset].getId, parameter[]]
name[readGroup].name assign[=] call[name[self].getLocalId, parameter[]]
name[readGroup].predicted_insert_size assign[=] call[name[pb].int, parameter[call[name[self].getPredictedInsertSize, parameter[]]]]
variable[referenceSet] assign[=] call[name[self]._parentContainer.getReferenceSet, parameter[]]
name[readGroup].sample_name assign[=] call[name[pb].string, parameter[call[name[self].getSampleName, parameter[]]]]
name[readGroup].biosample_id assign[=] call[name[pb].string, parameter[call[name[self].getBiosampleId, parameter[]]]]
if compare[name[referenceSet] is_not constant[None]] begin[:]
name[readGroup].reference_set_id assign[=] call[name[referenceSet].getId, parameter[]]
call[name[readGroup].stats.CopyFrom, parameter[call[name[self].getStats, parameter[]]]]
call[name[readGroup].programs.extend, parameter[call[name[self].getPrograms, parameter[]]]]
name[readGroup].description assign[=] call[name[pb].string, parameter[call[name[self].getDescription, parameter[]]]]
call[name[readGroup].experiment.CopyFrom, parameter[call[name[self].getExperiment, parameter[]]]]
call[name[self].serializeAttributes, parameter[name[readGroup]]]
return[name[readGroup]] | keyword[def] identifier[toProtocolElement] ( identifier[self] ):
literal[string]
identifier[readGroup] = identifier[protocol] . identifier[ReadGroup] ()
identifier[readGroup] . identifier[id] = identifier[self] . identifier[getId] ()
identifier[readGroup] . identifier[created] = identifier[self] . identifier[_creationTime]
identifier[readGroup] . identifier[updated] = identifier[self] . identifier[_updateTime]
identifier[dataset] = identifier[self] . identifier[getParentContainer] (). identifier[getParentContainer] ()
identifier[readGroup] . identifier[dataset_id] = identifier[dataset] . identifier[getId] ()
identifier[readGroup] . identifier[name] = identifier[self] . identifier[getLocalId] ()
identifier[readGroup] . identifier[predicted_insert_size] = identifier[pb] . identifier[int] ( identifier[self] . identifier[getPredictedInsertSize] ())
identifier[referenceSet] = identifier[self] . identifier[_parentContainer] . identifier[getReferenceSet] ()
identifier[readGroup] . identifier[sample_name] = identifier[pb] . identifier[string] ( identifier[self] . identifier[getSampleName] ())
identifier[readGroup] . identifier[biosample_id] = identifier[pb] . identifier[string] ( identifier[self] . identifier[getBiosampleId] ())
keyword[if] identifier[referenceSet] keyword[is] keyword[not] keyword[None] :
identifier[readGroup] . identifier[reference_set_id] = identifier[referenceSet] . identifier[getId] ()
identifier[readGroup] . identifier[stats] . identifier[CopyFrom] ( identifier[self] . identifier[getStats] ())
identifier[readGroup] . identifier[programs] . identifier[extend] ( identifier[self] . identifier[getPrograms] ())
identifier[readGroup] . identifier[description] = identifier[pb] . identifier[string] ( identifier[self] . identifier[getDescription] ())
identifier[readGroup] . identifier[experiment] . identifier[CopyFrom] ( identifier[self] . identifier[getExperiment] ())
identifier[self] . identifier[serializeAttributes] ( identifier[readGroup] )
keyword[return] identifier[readGroup] | def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
"""
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self.getId()
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
dataset = self.getParentContainer().getParentContainer()
readGroup.dataset_id = dataset.getId()
readGroup.name = self.getLocalId()
readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize())
referenceSet = self._parentContainer.getReferenceSet()
readGroup.sample_name = pb.string(self.getSampleName())
readGroup.biosample_id = pb.string(self.getBiosampleId())
if referenceSet is not None:
readGroup.reference_set_id = referenceSet.getId() # depends on [control=['if'], data=['referenceSet']]
readGroup.stats.CopyFrom(self.getStats())
readGroup.programs.extend(self.getPrograms())
readGroup.description = pb.string(self.getDescription())
readGroup.experiment.CopyFrom(self.getExperiment())
self.serializeAttributes(readGroup)
return readGroup |
def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False):
"""
Wrapper function for normalize -> reduce -> align transformations.
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
The data to analyze
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
analyzed_data : list of numpy arrays
The processed data
"""
# return processed data
return aligner(reducer(normalizer(data, normalize=normalize, internal=internal),
reduce=reduce, ndims=ndims, internal=internal), align=align) | def function[analyze, parameter[data, normalize, reduce, ndims, align, internal]]:
constant[
Wrapper function for normalize -> reduce -> align transformations.
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
The data to analyze
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
analyzed_data : list of numpy arrays
The processed data
]
return[call[name[aligner], parameter[call[name[reducer], parameter[call[name[normalizer], parameter[name[data]]]]]]]] | keyword[def] identifier[analyze] ( identifier[data] , identifier[normalize] = keyword[None] , identifier[reduce] = keyword[None] , identifier[ndims] = keyword[None] , identifier[align] = keyword[None] , identifier[internal] = keyword[False] ):
literal[string]
keyword[return] identifier[aligner] ( identifier[reducer] ( identifier[normalizer] ( identifier[data] , identifier[normalize] = identifier[normalize] , identifier[internal] = identifier[internal] ),
identifier[reduce] = identifier[reduce] , identifier[ndims] = identifier[ndims] , identifier[internal] = identifier[internal] ), identifier[align] = identifier[align] ) | def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False):
"""
Wrapper function for normalize -> reduce -> align transformations.
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
The data to analyze
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
analyzed_data : list of numpy arrays
The processed data
"""
# return processed data
return aligner(reducer(normalizer(data, normalize=normalize, internal=internal), reduce=reduce, ndims=ndims, internal=internal), align=align) |
def diff_custom_calendar_timedeltas(start, end, freq):
"""
Compute the difference between two pd.Timedelta taking into consideration
custom frequency, which is used to deal with custom calendars, such as a
trading calendar
Parameters
----------
start : pd.Timestamp
end : pd.Timestamp
freq : CustomBusinessDay (see infer_trading_calendar)
freq : pd.DataOffset (CustomBusinessDay, Day or BDay)
Returns
-------
pd.Timedelta
end - start
"""
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BusinessDay or CustomBusinessDay")
weekmask = getattr(freq, 'weekmask', None)
holidays = getattr(freq, 'holidays', None)
if weekmask is None and holidays is None:
if isinstance(freq, Day):
weekmask = 'Mon Tue Wed Thu Fri Sat Sun'
holidays = []
elif isinstance(freq, BusinessDay):
weekmask = 'Mon Tue Wed Thu Fri'
holidays = []
if weekmask is not None and holidays is not None:
# we prefer this method as it is faster
actual_days = np.busday_count(np.array(start).astype('datetime64[D]'),
np.array(end).astype('datetime64[D]'),
weekmask, holidays)
else:
# default, it is slow
actual_days = pd.date_range(start, end, freq=freq).shape[0] - 1
if not freq.onOffset(start):
actual_days -= 1
timediff = end - start
delta_days = timediff.components.days - actual_days
return timediff - pd.Timedelta(days=delta_days) | def function[diff_custom_calendar_timedeltas, parameter[start, end, freq]]:
constant[
Compute the difference between two pd.Timedelta taking into consideration
custom frequency, which is used to deal with custom calendars, such as a
trading calendar
Parameters
----------
start : pd.Timestamp
end : pd.Timestamp
freq : CustomBusinessDay (see infer_trading_calendar)
freq : pd.DataOffset (CustomBusinessDay, Day or BDay)
Returns
-------
pd.Timedelta
end - start
]
if <ast.UnaryOp object at 0x7da2054a4e50> begin[:]
<ast.Raise object at 0x7da2054a4610>
variable[weekmask] assign[=] call[name[getattr], parameter[name[freq], constant[weekmask], constant[None]]]
variable[holidays] assign[=] call[name[getattr], parameter[name[freq], constant[holidays], constant[None]]]
if <ast.BoolOp object at 0x7da2054a5060> begin[:]
if call[name[isinstance], parameter[name[freq], name[Day]]] begin[:]
variable[weekmask] assign[=] constant[Mon Tue Wed Thu Fri Sat Sun]
variable[holidays] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c76e830> begin[:]
variable[actual_days] assign[=] call[name[np].busday_count, parameter[call[call[name[np].array, parameter[name[start]]].astype, parameter[constant[datetime64[D]]]], call[call[name[np].array, parameter[name[end]]].astype, parameter[constant[datetime64[D]]]], name[weekmask], name[holidays]]]
variable[timediff] assign[=] binary_operation[name[end] - name[start]]
variable[delta_days] assign[=] binary_operation[name[timediff].components.days - name[actual_days]]
return[binary_operation[name[timediff] - call[name[pd].Timedelta, parameter[]]]] | keyword[def] identifier[diff_custom_calendar_timedeltas] ( identifier[start] , identifier[end] , identifier[freq] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[freq] ,( identifier[Day] , identifier[BusinessDay] , identifier[CustomBusinessDay] )):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[weekmask] = identifier[getattr] ( identifier[freq] , literal[string] , keyword[None] )
identifier[holidays] = identifier[getattr] ( identifier[freq] , literal[string] , keyword[None] )
keyword[if] identifier[weekmask] keyword[is] keyword[None] keyword[and] identifier[holidays] keyword[is] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[freq] , identifier[Day] ):
identifier[weekmask] = literal[string]
identifier[holidays] =[]
keyword[elif] identifier[isinstance] ( identifier[freq] , identifier[BusinessDay] ):
identifier[weekmask] = literal[string]
identifier[holidays] =[]
keyword[if] identifier[weekmask] keyword[is] keyword[not] keyword[None] keyword[and] identifier[holidays] keyword[is] keyword[not] keyword[None] :
identifier[actual_days] = identifier[np] . identifier[busday_count] ( identifier[np] . identifier[array] ( identifier[start] ). identifier[astype] ( literal[string] ),
identifier[np] . identifier[array] ( identifier[end] ). identifier[astype] ( literal[string] ),
identifier[weekmask] , identifier[holidays] )
keyword[else] :
identifier[actual_days] = identifier[pd] . identifier[date_range] ( identifier[start] , identifier[end] , identifier[freq] = identifier[freq] ). identifier[shape] [ literal[int] ]- literal[int]
keyword[if] keyword[not] identifier[freq] . identifier[onOffset] ( identifier[start] ):
identifier[actual_days] -= literal[int]
identifier[timediff] = identifier[end] - identifier[start]
identifier[delta_days] = identifier[timediff] . identifier[components] . identifier[days] - identifier[actual_days]
keyword[return] identifier[timediff] - identifier[pd] . identifier[Timedelta] ( identifier[days] = identifier[delta_days] ) | def diff_custom_calendar_timedeltas(start, end, freq):
"""
Compute the difference between two pd.Timedelta taking into consideration
custom frequency, which is used to deal with custom calendars, such as a
trading calendar
Parameters
----------
start : pd.Timestamp
end : pd.Timestamp
freq : CustomBusinessDay (see infer_trading_calendar)
freq : pd.DataOffset (CustomBusinessDay, Day or BDay)
Returns
-------
pd.Timedelta
end - start
"""
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError('freq must be Day, BusinessDay or CustomBusinessDay') # depends on [control=['if'], data=[]]
weekmask = getattr(freq, 'weekmask', None)
holidays = getattr(freq, 'holidays', None)
if weekmask is None and holidays is None:
if isinstance(freq, Day):
weekmask = 'Mon Tue Wed Thu Fri Sat Sun'
holidays = [] # depends on [control=['if'], data=[]]
elif isinstance(freq, BusinessDay):
weekmask = 'Mon Tue Wed Thu Fri'
holidays = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if weekmask is not None and holidays is not None:
# we prefer this method as it is faster
actual_days = np.busday_count(np.array(start).astype('datetime64[D]'), np.array(end).astype('datetime64[D]'), weekmask, holidays) # depends on [control=['if'], data=[]]
else:
# default, it is slow
actual_days = pd.date_range(start, end, freq=freq).shape[0] - 1
if not freq.onOffset(start):
actual_days -= 1 # depends on [control=['if'], data=[]]
timediff = end - start
delta_days = timediff.components.days - actual_days
return timediff - pd.Timedelta(days=delta_days) |
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this expression."""
self.validate()
mark_name, field_name = self.location.get_location_name()
if field_name is not None:
validate_safe_string(field_name)
if '@' in field_name:
template = u'm.{mark_name}[\'{field_name}\']'
else:
template = u'm.{mark_name}.{field_name}'
else:
template = u'm.{mark_name}'
validate_safe_string(mark_name)
return template.format(mark_name=mark_name, field_name=field_name) | def function[to_gremlin, parameter[self]]:
constant[Return a unicode object with the Gremlin representation of this expression.]
call[name[self].validate, parameter[]]
<ast.Tuple object at 0x7da1b17cde70> assign[=] call[name[self].location.get_location_name, parameter[]]
if compare[name[field_name] is_not constant[None]] begin[:]
call[name[validate_safe_string], parameter[name[field_name]]]
if compare[constant[@] in name[field_name]] begin[:]
variable[template] assign[=] constant[m.{mark_name}['{field_name}']]
call[name[validate_safe_string], parameter[name[mark_name]]]
return[call[name[template].format, parameter[]]] | keyword[def] identifier[to_gremlin] ( identifier[self] ):
literal[string]
identifier[self] . identifier[validate] ()
identifier[mark_name] , identifier[field_name] = identifier[self] . identifier[location] . identifier[get_location_name] ()
keyword[if] identifier[field_name] keyword[is] keyword[not] keyword[None] :
identifier[validate_safe_string] ( identifier[field_name] )
keyword[if] literal[string] keyword[in] identifier[field_name] :
identifier[template] = literal[string]
keyword[else] :
identifier[template] = literal[string]
keyword[else] :
identifier[template] = literal[string]
identifier[validate_safe_string] ( identifier[mark_name] )
keyword[return] identifier[template] . identifier[format] ( identifier[mark_name] = identifier[mark_name] , identifier[field_name] = identifier[field_name] ) | def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this expression."""
self.validate()
(mark_name, field_name) = self.location.get_location_name()
if field_name is not None:
validate_safe_string(field_name)
if '@' in field_name:
template = u"m.{mark_name}['{field_name}']" # depends on [control=['if'], data=[]]
else:
template = u'm.{mark_name}.{field_name}' # depends on [control=['if'], data=['field_name']]
else:
template = u'm.{mark_name}'
validate_safe_string(mark_name)
return template.format(mark_name=mark_name, field_name=field_name) |
def set_copy_mode(self, use_copy: bool):
"""
Set all protocols in copy mode. They will return a copy of their protocol.
This is used for writable mode in CFC.
:param use_copy:
:return:
"""
for group in self.rootItem.children:
for proto in group.children:
proto.copy_data = use_copy | def function[set_copy_mode, parameter[self, use_copy]]:
constant[
Set all protocols in copy mode. They will return a copy of their protocol.
This is used for writable mode in CFC.
:param use_copy:
:return:
]
for taget[name[group]] in starred[name[self].rootItem.children] begin[:]
for taget[name[proto]] in starred[name[group].children] begin[:]
name[proto].copy_data assign[=] name[use_copy] | keyword[def] identifier[set_copy_mode] ( identifier[self] , identifier[use_copy] : identifier[bool] ):
literal[string]
keyword[for] identifier[group] keyword[in] identifier[self] . identifier[rootItem] . identifier[children] :
keyword[for] identifier[proto] keyword[in] identifier[group] . identifier[children] :
identifier[proto] . identifier[copy_data] = identifier[use_copy] | def set_copy_mode(self, use_copy: bool):
"""
Set all protocols in copy mode. They will return a copy of their protocol.
This is used for writable mode in CFC.
:param use_copy:
:return:
"""
for group in self.rootItem.children:
for proto in group.children:
proto.copy_data = use_copy # depends on [control=['for'], data=['proto']] # depends on [control=['for'], data=['group']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.