code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def plot_phens_blits(phen_grid, patches, **kwargs):
"""
A version of plot_phens designed to be used in animations. Takes a 2D array
of phenotypes and a list of matplotlib patch objects that have already
been added to the current axes and recolors the patches based on the array.
"""
denom, palette = get_kwargs(phen_grid, kwargs)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
curr_patch = patches[i * len(grid[i]) + j]
if grid[i][j] == -1:
curr_patch.set_visible(False)
else:
curr_patch.set_facecolor(grid[i][j])
curr_patch.set_visible(True)
return patches
|
def function[plot_phens_blits, parameter[phen_grid, patches]]:
constant[
A version of plot_phens designed to be used in animations. Takes a 2D array
of phenotypes and a list of matplotlib patch objects that have already
been added to the current axes and recolors the patches based on the array.
]
<ast.Tuple object at 0x7da1b14554b0> assign[=] call[name[get_kwargs], parameter[name[phen_grid], name[kwargs]]]
variable[grid] assign[=] call[name[color_grid], parameter[name[phen_grid], name[palette], name[denom]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[grid]]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[grid]][name[i]]]]]]] begin[:]
variable[curr_patch] assign[=] call[name[patches]][binary_operation[binary_operation[name[i] * call[name[len], parameter[call[name[grid]][name[i]]]]] + name[j]]]
if compare[call[call[name[grid]][name[i]]][name[j]] equal[==] <ast.UnaryOp object at 0x7da1b1454310>] begin[:]
call[name[curr_patch].set_visible, parameter[constant[False]]]
return[name[patches]]
|
keyword[def] identifier[plot_phens_blits] ( identifier[phen_grid] , identifier[patches] ,** identifier[kwargs] ):
literal[string]
identifier[denom] , identifier[palette] = identifier[get_kwargs] ( identifier[phen_grid] , identifier[kwargs] )
identifier[grid] = identifier[color_grid] ( identifier[phen_grid] , identifier[palette] , identifier[denom] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[grid] )):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[grid] [ identifier[i] ])):
identifier[curr_patch] = identifier[patches] [ identifier[i] * identifier[len] ( identifier[grid] [ identifier[i] ])+ identifier[j] ]
keyword[if] identifier[grid] [ identifier[i] ][ identifier[j] ]==- literal[int] :
identifier[curr_patch] . identifier[set_visible] ( keyword[False] )
keyword[else] :
identifier[curr_patch] . identifier[set_facecolor] ( identifier[grid] [ identifier[i] ][ identifier[j] ])
identifier[curr_patch] . identifier[set_visible] ( keyword[True] )
keyword[return] identifier[patches]
|
def plot_phens_blits(phen_grid, patches, **kwargs):
"""
A version of plot_phens designed to be used in animations. Takes a 2D array
of phenotypes and a list of matplotlib patch objects that have already
been added to the current axes and recolors the patches based on the array.
"""
(denom, palette) = get_kwargs(phen_grid, kwargs)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
curr_patch = patches[i * len(grid[i]) + j]
if grid[i][j] == -1:
curr_patch.set_visible(False) # depends on [control=['if'], data=[]]
else:
curr_patch.set_facecolor(grid[i][j])
curr_patch.set_visible(True) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return patches
|
def log_response(_, _request, response, *_args, **kwargs):
# type: (Any, ClientRequest, ClientResponse, str, Any) -> Optional[ClientResponse]
"""Log a server response.
:param _: Unused in current version (will be None)
:param requests.Request request: The request object.
:param requests.Response response: The response object.
"""
if not _LOGGER.isEnabledFor(logging.DEBUG):
return None
try:
_LOGGER.debug("Response status: %r", response.status_code)
_LOGGER.debug("Response headers:")
for res_header, value in response.headers.items():
_LOGGER.debug(" %r: %r", res_header, value)
# We don't want to log binary data if the response is a file.
_LOGGER.debug("Response content:")
pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
header = response.headers.get('content-disposition')
if header and pattern.match(header):
filename = header.partition('=')[2]
_LOGGER.debug("File attachments: %s", filename)
elif response.headers.get("content-type", "").endswith("octet-stream"):
_LOGGER.debug("Body contains binary data.")
elif response.headers.get("content-type", "").startswith("image"):
_LOGGER.debug("Body contains image data.")
else:
if kwargs.get('stream', False):
_LOGGER.debug("Body is streamable")
else:
_LOGGER.debug(response.text())
return response
except Exception as err: # pylint: disable=broad-except
_LOGGER.debug("Failed to log response: %s", repr(err))
return response
|
def function[log_response, parameter[_, _request, response]]:
constant[Log a server response.
:param _: Unused in current version (will be None)
:param requests.Request request: The request object.
:param requests.Response response: The response object.
]
if <ast.UnaryOp object at 0x7da18dc07c40> begin[:]
return[constant[None]]
<ast.Try object at 0x7da18dc04700>
|
keyword[def] identifier[log_response] ( identifier[_] , identifier[_request] , identifier[response] ,* identifier[_args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[_LOGGER] . identifier[isEnabledFor] ( identifier[logging] . identifier[DEBUG] ):
keyword[return] keyword[None]
keyword[try] :
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[response] . identifier[status_code] )
identifier[_LOGGER] . identifier[debug] ( literal[string] )
keyword[for] identifier[res_header] , identifier[value] keyword[in] identifier[response] . identifier[headers] . identifier[items] ():
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[res_header] , identifier[value] )
identifier[_LOGGER] . identifier[debug] ( literal[string] )
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[IGNORECASE] )
identifier[header] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[header] keyword[and] identifier[pattern] . identifier[match] ( identifier[header] ):
identifier[filename] = identifier[header] . identifier[partition] ( literal[string] )[ literal[int] ]
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[filename] )
keyword[elif] identifier[response] . identifier[headers] . identifier[get] ( literal[string] , literal[string] ). identifier[endswith] ( literal[string] ):
identifier[_LOGGER] . identifier[debug] ( literal[string] )
keyword[elif] identifier[response] . identifier[headers] . identifier[get] ( literal[string] , literal[string] ). identifier[startswith] ( literal[string] ):
identifier[_LOGGER] . identifier[debug] ( literal[string] )
keyword[else] :
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[_LOGGER] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[_LOGGER] . identifier[debug] ( identifier[response] . identifier[text] ())
keyword[return] identifier[response]
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[repr] ( identifier[err] ))
keyword[return] identifier[response]
|
def log_response(_, _request, response, *_args, **kwargs):
# type: (Any, ClientRequest, ClientResponse, str, Any) -> Optional[ClientResponse]
'Log a server response.\n\n :param _: Unused in current version (will be None)\n :param requests.Request request: The request object.\n :param requests.Response response: The response object.\n '
if not _LOGGER.isEnabledFor(logging.DEBUG):
return None # depends on [control=['if'], data=[]]
try:
_LOGGER.debug('Response status: %r', response.status_code)
_LOGGER.debug('Response headers:')
for (res_header, value) in response.headers.items():
_LOGGER.debug(' %r: %r', res_header, value) # depends on [control=['for'], data=[]]
# We don't want to log binary data if the response is a file.
_LOGGER.debug('Response content:')
pattern = re.compile('attachment; ?filename=["\\w.]+', re.IGNORECASE)
header = response.headers.get('content-disposition')
if header and pattern.match(header):
filename = header.partition('=')[2]
_LOGGER.debug('File attachments: %s', filename) # depends on [control=['if'], data=[]]
elif response.headers.get('content-type', '').endswith('octet-stream'):
_LOGGER.debug('Body contains binary data.') # depends on [control=['if'], data=[]]
elif response.headers.get('content-type', '').startswith('image'):
_LOGGER.debug('Body contains image data.') # depends on [control=['if'], data=[]]
elif kwargs.get('stream', False):
_LOGGER.debug('Body is streamable') # depends on [control=['if'], data=[]]
else:
_LOGGER.debug(response.text())
return response # depends on [control=['try'], data=[]]
except Exception as err: # pylint: disable=broad-except
_LOGGER.debug('Failed to log response: %s', repr(err))
return response # depends on [control=['except'], data=['err']]
|
def calculate_crop_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C, H', W']
2. [N, C, H, W], shape-ref [N', C', H', W'] ---> [N, C, H', W']
'''
check_input_and_output_numbers(operator, input_count_range=[1, 2], output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
output_shape = copy.deepcopy(operator.inputs[0].type.shape)
params = operator.raw_operator.crop
if len(operator.inputs) == 1:
if len(params.cropAmounts.borderAmounts) > 0:
output_shape[2] -= params.cropAmounts.borderAmounts[0].startEdgeSize
output_shape[2] -= params.cropAmounts.borderAmounts[0].endEdgeSize
output_shape[3] -= params.cropAmounts.borderAmounts[1].startEdgeSize
output_shape[3] -= params.cropAmounts.borderAmounts[1].endEdgeSize
elif len(operator.inputs) == 2:
output_shape[2] = operator.inputs[1].type.shape[2]
output_shape[3] = operator.inputs[1].type.shape[3]
else:
raise RuntimeError('Too many inputs for Crop operator')
operator.outputs[0].type.shape = output_shape
|
def function[calculate_crop_output_shapes, parameter[operator]]:
constant[
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C, H', W']
2. [N, C, H, W], shape-ref [N', C', H', W'] ---> [N, C, H', W']
]
call[name[check_input_and_output_numbers], parameter[name[operator]]]
call[name[check_input_and_output_types], parameter[name[operator]]]
variable[output_shape] assign[=] call[name[copy].deepcopy, parameter[call[name[operator].inputs][constant[0]].type.shape]]
variable[params] assign[=] name[operator].raw_operator.crop
if compare[call[name[len], parameter[name[operator].inputs]] equal[==] constant[1]] begin[:]
if compare[call[name[len], parameter[name[params].cropAmounts.borderAmounts]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b1e75570>
<ast.AugAssign object at 0x7da1b1e75750>
<ast.AugAssign object at 0x7da1b1e75930>
<ast.AugAssign object at 0x7da1b1e75fc0>
call[name[operator].outputs][constant[0]].type.shape assign[=] name[output_shape]
|
keyword[def] identifier[calculate_crop_output_shapes] ( identifier[operator] ):
literal[string]
identifier[check_input_and_output_numbers] ( identifier[operator] , identifier[input_count_range] =[ literal[int] , literal[int] ], identifier[output_count_range] = literal[int] )
identifier[check_input_and_output_types] ( identifier[operator] , identifier[good_input_types] =[ identifier[FloatTensorType] ])
identifier[output_shape] = identifier[copy] . identifier[deepcopy] ( identifier[operator] . identifier[inputs] [ literal[int] ]. identifier[type] . identifier[shape] )
identifier[params] = identifier[operator] . identifier[raw_operator] . identifier[crop]
keyword[if] identifier[len] ( identifier[operator] . identifier[inputs] )== literal[int] :
keyword[if] identifier[len] ( identifier[params] . identifier[cropAmounts] . identifier[borderAmounts] )> literal[int] :
identifier[output_shape] [ literal[int] ]-= identifier[params] . identifier[cropAmounts] . identifier[borderAmounts] [ literal[int] ]. identifier[startEdgeSize]
identifier[output_shape] [ literal[int] ]-= identifier[params] . identifier[cropAmounts] . identifier[borderAmounts] [ literal[int] ]. identifier[endEdgeSize]
identifier[output_shape] [ literal[int] ]-= identifier[params] . identifier[cropAmounts] . identifier[borderAmounts] [ literal[int] ]. identifier[startEdgeSize]
identifier[output_shape] [ literal[int] ]-= identifier[params] . identifier[cropAmounts] . identifier[borderAmounts] [ literal[int] ]. identifier[endEdgeSize]
keyword[elif] identifier[len] ( identifier[operator] . identifier[inputs] )== literal[int] :
identifier[output_shape] [ literal[int] ]= identifier[operator] . identifier[inputs] [ literal[int] ]. identifier[type] . identifier[shape] [ literal[int] ]
identifier[output_shape] [ literal[int] ]= identifier[operator] . identifier[inputs] [ literal[int] ]. identifier[type] . identifier[shape] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[operator] . identifier[outputs] [ literal[int] ]. identifier[type] . identifier[shape] = identifier[output_shape]
|
def calculate_crop_output_shapes(operator):
"""
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C, H', W']
2. [N, C, H, W], shape-ref [N', C', H', W'] ---> [N, C, H', W']
"""
check_input_and_output_numbers(operator, input_count_range=[1, 2], output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
output_shape = copy.deepcopy(operator.inputs[0].type.shape)
params = operator.raw_operator.crop
if len(operator.inputs) == 1:
if len(params.cropAmounts.borderAmounts) > 0:
output_shape[2] -= params.cropAmounts.borderAmounts[0].startEdgeSize
output_shape[2] -= params.cropAmounts.borderAmounts[0].endEdgeSize
output_shape[3] -= params.cropAmounts.borderAmounts[1].startEdgeSize
output_shape[3] -= params.cropAmounts.borderAmounts[1].endEdgeSize # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif len(operator.inputs) == 2:
output_shape[2] = operator.inputs[1].type.shape[2]
output_shape[3] = operator.inputs[1].type.shape[3] # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Too many inputs for Crop operator')
operator.outputs[0].type.shape = output_shape
|
def to_xdr_object(self):
"""Creates an XDR Memo object for a transaction with MEMO_HASH."""
return Xdr.types.Memo(type=Xdr.const.MEMO_HASH, hash=self.memo_hash)
|
def function[to_xdr_object, parameter[self]]:
constant[Creates an XDR Memo object for a transaction with MEMO_HASH.]
return[call[name[Xdr].types.Memo, parameter[]]]
|
keyword[def] identifier[to_xdr_object] ( identifier[self] ):
literal[string]
keyword[return] identifier[Xdr] . identifier[types] . identifier[Memo] ( identifier[type] = identifier[Xdr] . identifier[const] . identifier[MEMO_HASH] , identifier[hash] = identifier[self] . identifier[memo_hash] )
|
def to_xdr_object(self):
"""Creates an XDR Memo object for a transaction with MEMO_HASH."""
return Xdr.types.Memo(type=Xdr.const.MEMO_HASH, hash=self.memo_hash)
|
def add_real_directory(self, source_path, read_only=True, lazy_read=True,
target_path=None):
"""Create a fake directory corresponding to the real directory at the
specified path. Add entries in the fake directory corresponding to
the entries in the real directory.
Args:
source_path: The path to the existing directory.
read_only: If set, all files under the directory are treated as
read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files only
as usually.
lazy_read: If set (default), directory contents are only read when
accessed, and only until the needed subdirectory level.
.. note:: This means that the file system size is only updated
at the time the directory contents are read; set this to
`False` only if you are dependent on accurate file system
size in your test
target_path: If given, the target directory, otherwise,
the target directory is the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
IOError: if the directory already exists in the fake file system.
"""
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path):
self.raise_io_error(errno.ENOENT, source_path)
target_path = target_path or source_path
if lazy_read:
parent_path = os.path.split(target_path)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path)
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(
source_path, self, read_only, target_path)
parent_dir.add_entry(new_dir)
self._last_ino += 1
new_dir.st_ino = self._last_ino
else:
new_dir = self.create_dir(target_path)
for base, _, files in os.walk(source_path):
new_base = os.path.join(new_dir.path,
os.path.relpath(base, source_path))
for fileEntry in files:
self.add_real_file(os.path.join(base, fileEntry),
read_only,
os.path.join(new_base, fileEntry))
return new_dir
|
def function[add_real_directory, parameter[self, source_path, read_only, lazy_read, target_path]]:
constant[Create a fake directory corresponding to the real directory at the
specified path. Add entries in the fake directory corresponding to
the entries in the real directory.
Args:
source_path: The path to the existing directory.
read_only: If set, all files under the directory are treated as
read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files only
as usually.
lazy_read: If set (default), directory contents are only read when
accessed, and only until the needed subdirectory level.
.. note:: This means that the file system size is only updated
at the time the directory contents are read; set this to
`False` only if you are dependent on accurate file system
size in your test
target_path: If given, the target directory, otherwise,
the target directory is the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
IOError: if the directory already exists in the fake file system.
]
variable[source_path] assign[=] call[name[self]._path_without_trailing_separators, parameter[name[source_path]]]
if <ast.UnaryOp object at 0x7da18dc067a0> begin[:]
call[name[self].raise_io_error, parameter[name[errno].ENOENT, name[source_path]]]
variable[target_path] assign[=] <ast.BoolOp object at 0x7da18dc04490>
if name[lazy_read] begin[:]
variable[parent_path] assign[=] call[call[name[os].path.split, parameter[name[target_path]]]][constant[0]]
if call[name[self].exists, parameter[name[parent_path]]] begin[:]
variable[parent_dir] assign[=] call[name[self].get_object, parameter[name[parent_path]]]
variable[new_dir] assign[=] call[name[FakeDirectoryFromRealDirectory], parameter[name[source_path], name[self], name[read_only], name[target_path]]]
call[name[parent_dir].add_entry, parameter[name[new_dir]]]
<ast.AugAssign object at 0x7da2047e9e40>
name[new_dir].st_ino assign[=] name[self]._last_ino
return[name[new_dir]]
|
keyword[def] identifier[add_real_directory] ( identifier[self] , identifier[source_path] , identifier[read_only] = keyword[True] , identifier[lazy_read] = keyword[True] ,
identifier[target_path] = keyword[None] ):
literal[string]
identifier[source_path] = identifier[self] . identifier[_path_without_trailing_separators] ( identifier[source_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[source_path] ):
identifier[self] . identifier[raise_io_error] ( identifier[errno] . identifier[ENOENT] , identifier[source_path] )
identifier[target_path] = identifier[target_path] keyword[or] identifier[source_path]
keyword[if] identifier[lazy_read] :
identifier[parent_path] = identifier[os] . identifier[path] . identifier[split] ( identifier[target_path] )[ literal[int] ]
keyword[if] identifier[self] . identifier[exists] ( identifier[parent_path] ):
identifier[parent_dir] = identifier[self] . identifier[get_object] ( identifier[parent_path] )
keyword[else] :
identifier[parent_dir] = identifier[self] . identifier[create_dir] ( identifier[parent_path] )
identifier[new_dir] = identifier[FakeDirectoryFromRealDirectory] (
identifier[source_path] , identifier[self] , identifier[read_only] , identifier[target_path] )
identifier[parent_dir] . identifier[add_entry] ( identifier[new_dir] )
identifier[self] . identifier[_last_ino] += literal[int]
identifier[new_dir] . identifier[st_ino] = identifier[self] . identifier[_last_ino]
keyword[else] :
identifier[new_dir] = identifier[self] . identifier[create_dir] ( identifier[target_path] )
keyword[for] identifier[base] , identifier[_] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[source_path] ):
identifier[new_base] = identifier[os] . identifier[path] . identifier[join] ( identifier[new_dir] . identifier[path] ,
identifier[os] . identifier[path] . identifier[relpath] ( identifier[base] , identifier[source_path] ))
keyword[for] identifier[fileEntry] keyword[in] identifier[files] :
identifier[self] . identifier[add_real_file] ( identifier[os] . identifier[path] . identifier[join] ( identifier[base] , identifier[fileEntry] ),
identifier[read_only] ,
identifier[os] . identifier[path] . identifier[join] ( identifier[new_base] , identifier[fileEntry] ))
keyword[return] identifier[new_dir]
|
def add_real_directory(self, source_path, read_only=True, lazy_read=True, target_path=None):
"""Create a fake directory corresponding to the real directory at the
specified path. Add entries in the fake directory corresponding to
the entries in the real directory.
Args:
source_path: The path to the existing directory.
read_only: If set, all files under the directory are treated as
read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files only
as usually.
lazy_read: If set (default), directory contents are only read when
accessed, and only until the needed subdirectory level.
.. note:: This means that the file system size is only updated
at the time the directory contents are read; set this to
`False` only if you are dependent on accurate file system
size in your test
target_path: If given, the target directory, otherwise,
the target directory is the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
IOError: if the directory already exists in the fake file system.
"""
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path):
self.raise_io_error(errno.ENOENT, source_path) # depends on [control=['if'], data=[]]
target_path = target_path or source_path
if lazy_read:
parent_path = os.path.split(target_path)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path) # depends on [control=['if'], data=[]]
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(source_path, self, read_only, target_path)
parent_dir.add_entry(new_dir)
self._last_ino += 1
new_dir.st_ino = self._last_ino # depends on [control=['if'], data=[]]
else:
new_dir = self.create_dir(target_path)
for (base, _, files) in os.walk(source_path):
new_base = os.path.join(new_dir.path, os.path.relpath(base, source_path))
for fileEntry in files:
self.add_real_file(os.path.join(base, fileEntry), read_only, os.path.join(new_base, fileEntry)) # depends on [control=['for'], data=['fileEntry']] # depends on [control=['for'], data=[]]
return new_dir
|
def prepare_components(self):
"""Prepare components that are going to be generated based on
user options.
:return: Updated list of components.
:rtype: dict
"""
# Register the components based on user option
# First, tabular report
generated_components = deepcopy(all_default_report_components)
# Rohmat: I need to define the definitions here, I can't get
# the definition using definition helper method.
component_definitions = {
impact_report_pdf_component['key']:
impact_report_pdf_component,
action_checklist_pdf_component['key']:
action_checklist_pdf_component,
analysis_provenance_details_pdf_component['key']:
analysis_provenance_details_pdf_component,
infographic_report['key']: infographic_report
}
duplicated_report_metadata = None
for key, checkbox in list(self.all_checkboxes.items()):
if not checkbox.isChecked():
component = component_definitions[key]
if component in generated_components:
generated_components.remove(component)
continue
if self.is_multi_exposure:
impact_report_metadata = (
standard_multi_exposure_impact_report_metadata_pdf)
else:
impact_report_metadata = (
standard_impact_report_metadata_pdf)
if component in impact_report_metadata['components']:
if not duplicated_report_metadata:
duplicated_report_metadata = deepcopy(
impact_report_metadata)
duplicated_report_metadata['components'].remove(
component)
if impact_report_metadata in generated_components:
generated_components.remove(
impact_report_metadata)
generated_components.append(
duplicated_report_metadata)
# Second, custom and map report
# Get selected template path to use
selected_template_path = None
if self.search_directory_radio.isChecked():
selected_template_path = self.template_combo.itemData(
self.template_combo.currentIndex())
elif self.search_on_disk_radio.isChecked():
selected_template_path = self.template_path.text()
if not exists(selected_template_path):
# noinspection PyCallByClass,PyTypeChecker
QtWidgets.QMessageBox.warning(
self,
tr('InaSAFE'),
tr(
'Please select a valid template before printing. '
'The template you choose does not exist.'))
if map_report in generated_components:
# if self.no_map_radio.isChecked():
# generated_components.remove(map_report)
if self.default_template_radio.isChecked():
# make sure map report is there
generated_components.append(
generated_components.pop(
generated_components.index(map_report)))
elif self.override_template_radio.isChecked():
hazard_type = definition(
self.impact_function.provenance['hazard_keywords'][
'hazard'])
exposure_type = definition(
self.impact_function.provenance['exposure_keywords'][
'exposure'])
generated_components.remove(map_report)
generated_components.append(
update_template_component(
component=map_report,
hazard=hazard_type,
exposure=exposure_type))
elif selected_template_path:
generated_components.remove(map_report)
generated_components.append(
override_component_template(
map_report, selected_template_path))
return generated_components
|
def function[prepare_components, parameter[self]]:
constant[Prepare components that are going to be generated based on
user options.
:return: Updated list of components.
:rtype: dict
]
variable[generated_components] assign[=] call[name[deepcopy], parameter[name[all_default_report_components]]]
variable[component_definitions] assign[=] dictionary[[<ast.Subscript object at 0x7da2041dbee0>, <ast.Subscript object at 0x7da2041da5c0>, <ast.Subscript object at 0x7da2041d8b80>, <ast.Subscript object at 0x7da2041db8e0>], [<ast.Name object at 0x7da2041dbca0>, <ast.Name object at 0x7da2041da350>, <ast.Name object at 0x7da2041dada0>, <ast.Name object at 0x7da2041d8250>]]
variable[duplicated_report_metadata] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da2041d8c10>, <ast.Name object at 0x7da2041d9480>]]] in starred[call[name[list], parameter[call[name[self].all_checkboxes.items, parameter[]]]]] begin[:]
if <ast.UnaryOp object at 0x7da2041d8790> begin[:]
variable[component] assign[=] call[name[component_definitions]][name[key]]
if compare[name[component] in name[generated_components]] begin[:]
call[name[generated_components].remove, parameter[name[component]]]
continue
if name[self].is_multi_exposure begin[:]
variable[impact_report_metadata] assign[=] name[standard_multi_exposure_impact_report_metadata_pdf]
if compare[name[component] in call[name[impact_report_metadata]][constant[components]]] begin[:]
if <ast.UnaryOp object at 0x7da2041d8880> begin[:]
variable[duplicated_report_metadata] assign[=] call[name[deepcopy], parameter[name[impact_report_metadata]]]
call[call[name[duplicated_report_metadata]][constant[components]].remove, parameter[name[component]]]
if compare[name[impact_report_metadata] in name[generated_components]] begin[:]
call[name[generated_components].remove, parameter[name[impact_report_metadata]]]
call[name[generated_components].append, parameter[name[duplicated_report_metadata]]]
variable[selected_template_path] assign[=] constant[None]
if call[name[self].search_directory_radio.isChecked, parameter[]] begin[:]
variable[selected_template_path] assign[=] call[name[self].template_combo.itemData, parameter[call[name[self].template_combo.currentIndex, parameter[]]]]
if compare[name[map_report] in name[generated_components]] begin[:]
if call[name[self].default_template_radio.isChecked, parameter[]] begin[:]
call[name[generated_components].append, parameter[call[name[generated_components].pop, parameter[call[name[generated_components].index, parameter[name[map_report]]]]]]]
return[name[generated_components]]
|
keyword[def] identifier[prepare_components] ( identifier[self] ):
literal[string]
identifier[generated_components] = identifier[deepcopy] ( identifier[all_default_report_components] )
identifier[component_definitions] ={
identifier[impact_report_pdf_component] [ literal[string] ]:
identifier[impact_report_pdf_component] ,
identifier[action_checklist_pdf_component] [ literal[string] ]:
identifier[action_checklist_pdf_component] ,
identifier[analysis_provenance_details_pdf_component] [ literal[string] ]:
identifier[analysis_provenance_details_pdf_component] ,
identifier[infographic_report] [ literal[string] ]: identifier[infographic_report]
}
identifier[duplicated_report_metadata] = keyword[None]
keyword[for] identifier[key] , identifier[checkbox] keyword[in] identifier[list] ( identifier[self] . identifier[all_checkboxes] . identifier[items] ()):
keyword[if] keyword[not] identifier[checkbox] . identifier[isChecked] ():
identifier[component] = identifier[component_definitions] [ identifier[key] ]
keyword[if] identifier[component] keyword[in] identifier[generated_components] :
identifier[generated_components] . identifier[remove] ( identifier[component] )
keyword[continue]
keyword[if] identifier[self] . identifier[is_multi_exposure] :
identifier[impact_report_metadata] =(
identifier[standard_multi_exposure_impact_report_metadata_pdf] )
keyword[else] :
identifier[impact_report_metadata] =(
identifier[standard_impact_report_metadata_pdf] )
keyword[if] identifier[component] keyword[in] identifier[impact_report_metadata] [ literal[string] ]:
keyword[if] keyword[not] identifier[duplicated_report_metadata] :
identifier[duplicated_report_metadata] = identifier[deepcopy] (
identifier[impact_report_metadata] )
identifier[duplicated_report_metadata] [ literal[string] ]. identifier[remove] (
identifier[component] )
keyword[if] identifier[impact_report_metadata] keyword[in] identifier[generated_components] :
identifier[generated_components] . identifier[remove] (
identifier[impact_report_metadata] )
identifier[generated_components] . identifier[append] (
identifier[duplicated_report_metadata] )
identifier[selected_template_path] = keyword[None]
keyword[if] identifier[self] . identifier[search_directory_radio] . identifier[isChecked] ():
identifier[selected_template_path] = identifier[self] . identifier[template_combo] . identifier[itemData] (
identifier[self] . identifier[template_combo] . identifier[currentIndex] ())
keyword[elif] identifier[self] . identifier[search_on_disk_radio] . identifier[isChecked] ():
identifier[selected_template_path] = identifier[self] . identifier[template_path] . identifier[text] ()
keyword[if] keyword[not] identifier[exists] ( identifier[selected_template_path] ):
identifier[QtWidgets] . identifier[QMessageBox] . identifier[warning] (
identifier[self] ,
identifier[tr] ( literal[string] ),
identifier[tr] (
literal[string]
literal[string] ))
keyword[if] identifier[map_report] keyword[in] identifier[generated_components] :
keyword[if] identifier[self] . identifier[default_template_radio] . identifier[isChecked] ():
identifier[generated_components] . identifier[append] (
identifier[generated_components] . identifier[pop] (
identifier[generated_components] . identifier[index] ( identifier[map_report] )))
keyword[elif] identifier[self] . identifier[override_template_radio] . identifier[isChecked] ():
identifier[hazard_type] = identifier[definition] (
identifier[self] . identifier[impact_function] . identifier[provenance] [ literal[string] ][
literal[string] ])
identifier[exposure_type] = identifier[definition] (
identifier[self] . identifier[impact_function] . identifier[provenance] [ literal[string] ][
literal[string] ])
identifier[generated_components] . identifier[remove] ( identifier[map_report] )
identifier[generated_components] . identifier[append] (
identifier[update_template_component] (
identifier[component] = identifier[map_report] ,
identifier[hazard] = identifier[hazard_type] ,
identifier[exposure] = identifier[exposure_type] ))
keyword[elif] identifier[selected_template_path] :
identifier[generated_components] . identifier[remove] ( identifier[map_report] )
identifier[generated_components] . identifier[append] (
identifier[override_component_template] (
identifier[map_report] , identifier[selected_template_path] ))
keyword[return] identifier[generated_components]
|
def prepare_components(self):
"""Prepare components that are going to be generated based on
user options.
:return: Updated list of components.
:rtype: dict
"""
# Register the components based on user option
# First, tabular report
generated_components = deepcopy(all_default_report_components)
# Rohmat: I need to define the definitions here, I can't get
# the definition using definition helper method.
component_definitions = {impact_report_pdf_component['key']: impact_report_pdf_component, action_checklist_pdf_component['key']: action_checklist_pdf_component, analysis_provenance_details_pdf_component['key']: analysis_provenance_details_pdf_component, infographic_report['key']: infographic_report}
duplicated_report_metadata = None
for (key, checkbox) in list(self.all_checkboxes.items()):
if not checkbox.isChecked():
component = component_definitions[key]
if component in generated_components:
generated_components.remove(component)
continue # depends on [control=['if'], data=['component', 'generated_components']]
if self.is_multi_exposure:
impact_report_metadata = standard_multi_exposure_impact_report_metadata_pdf # depends on [control=['if'], data=[]]
else:
impact_report_metadata = standard_impact_report_metadata_pdf
if component in impact_report_metadata['components']:
if not duplicated_report_metadata:
duplicated_report_metadata = deepcopy(impact_report_metadata) # depends on [control=['if'], data=[]]
duplicated_report_metadata['components'].remove(component)
if impact_report_metadata in generated_components:
generated_components.remove(impact_report_metadata)
generated_components.append(duplicated_report_metadata) # depends on [control=['if'], data=['impact_report_metadata', 'generated_components']] # depends on [control=['if'], data=['component']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Second, custom and map report
# Get selected template path to use
selected_template_path = None
if self.search_directory_radio.isChecked():
selected_template_path = self.template_combo.itemData(self.template_combo.currentIndex()) # depends on [control=['if'], data=[]]
elif self.search_on_disk_radio.isChecked():
selected_template_path = self.template_path.text()
if not exists(selected_template_path):
# noinspection PyCallByClass,PyTypeChecker
QtWidgets.QMessageBox.warning(self, tr('InaSAFE'), tr('Please select a valid template before printing. The template you choose does not exist.')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if map_report in generated_components:
# if self.no_map_radio.isChecked():
# generated_components.remove(map_report)
if self.default_template_radio.isChecked():
# make sure map report is there
generated_components.append(generated_components.pop(generated_components.index(map_report))) # depends on [control=['if'], data=[]]
elif self.override_template_radio.isChecked():
hazard_type = definition(self.impact_function.provenance['hazard_keywords']['hazard'])
exposure_type = definition(self.impact_function.provenance['exposure_keywords']['exposure'])
generated_components.remove(map_report)
generated_components.append(update_template_component(component=map_report, hazard=hazard_type, exposure=exposure_type)) # depends on [control=['if'], data=[]]
elif selected_template_path:
generated_components.remove(map_report)
generated_components.append(override_component_template(map_report, selected_template_path)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['map_report', 'generated_components']]
return generated_components
|
def sorted_t(key=None, reverse=False):
"""
Transformation for Sequence.sorted
:param key: key to sort by
:param reverse: reverse or not
:return: transformation
"""
return Transformation(
'sorted',
lambda sequence: sorted(sequence, key=key, reverse=reverse),
None
)
|
def function[sorted_t, parameter[key, reverse]]:
constant[
Transformation for Sequence.sorted
:param key: key to sort by
:param reverse: reverse or not
:return: transformation
]
return[call[name[Transformation], parameter[constant[sorted], <ast.Lambda object at 0x7da204960cd0>, constant[None]]]]
|
keyword[def] identifier[sorted_t] ( identifier[key] = keyword[None] , identifier[reverse] = keyword[False] ):
literal[string]
keyword[return] identifier[Transformation] (
literal[string] ,
keyword[lambda] identifier[sequence] : identifier[sorted] ( identifier[sequence] , identifier[key] = identifier[key] , identifier[reverse] = identifier[reverse] ),
keyword[None]
)
|
def sorted_t(key=None, reverse=False):
"""
Transformation for Sequence.sorted
:param key: key to sort by
:param reverse: reverse or not
:return: transformation
"""
return Transformation('sorted', lambda sequence: sorted(sequence, key=key, reverse=reverse), None)
|
def flush(self):
"""Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list['ServicecontrolServicesAllocateQuotaRequest']: corresponding
to AllocateQuotaRequests that were pending
"""
if self._cache is None:
return []
with self._cache as c, self._out as out:
c.expire()
now = self._timer()
for item in c.values():
if (not self._in_flush_all) and (not self._should_expire(item)):
if (not item.is_in_flight) and item._op_aggregator is not None:
item.is_in_flight = True
item.last_refresh_timestamp = now
out.append(item.extract_request()) # pylint: disable=no-member
flushed_items = list(out)
out.clear() # pylint: disable=no-member
for req in flushed_items:
assert isinstance(req, sc_messages.ServicecontrolServicesAllocateQuotaRequest)
return flushed_items
|
def function[flush, parameter[self]]:
constant[Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list['ServicecontrolServicesAllocateQuotaRequest']: corresponding
to AllocateQuotaRequests that were pending
]
if compare[name[self]._cache is constant[None]] begin[:]
return[list[[]]]
with name[self]._cache begin[:]
call[name[c].expire, parameter[]]
variable[now] assign[=] call[name[self]._timer, parameter[]]
for taget[name[item]] in starred[call[name[c].values, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0470910> begin[:]
if <ast.BoolOp object at 0x7da1b0472500> begin[:]
name[item].is_in_flight assign[=] constant[True]
name[item].last_refresh_timestamp assign[=] name[now]
call[name[out].append, parameter[call[name[item].extract_request, parameter[]]]]
variable[flushed_items] assign[=] call[name[list], parameter[name[out]]]
call[name[out].clear, parameter[]]
for taget[name[req]] in starred[name[flushed_items]] begin[:]
assert[call[name[isinstance], parameter[name[req], name[sc_messages].ServicecontrolServicesAllocateQuotaRequest]]]
return[name[flushed_items]]
|
keyword[def] identifier[flush] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_cache] keyword[is] keyword[None] :
keyword[return] []
keyword[with] identifier[self] . identifier[_cache] keyword[as] identifier[c] , identifier[self] . identifier[_out] keyword[as] identifier[out] :
identifier[c] . identifier[expire] ()
identifier[now] = identifier[self] . identifier[_timer] ()
keyword[for] identifier[item] keyword[in] identifier[c] . identifier[values] ():
keyword[if] ( keyword[not] identifier[self] . identifier[_in_flush_all] ) keyword[and] ( keyword[not] identifier[self] . identifier[_should_expire] ( identifier[item] )):
keyword[if] ( keyword[not] identifier[item] . identifier[is_in_flight] ) keyword[and] identifier[item] . identifier[_op_aggregator] keyword[is] keyword[not] keyword[None] :
identifier[item] . identifier[is_in_flight] = keyword[True]
identifier[item] . identifier[last_refresh_timestamp] = identifier[now]
identifier[out] . identifier[append] ( identifier[item] . identifier[extract_request] ())
identifier[flushed_items] = identifier[list] ( identifier[out] )
identifier[out] . identifier[clear] ()
keyword[for] identifier[req] keyword[in] identifier[flushed_items] :
keyword[assert] identifier[isinstance] ( identifier[req] , identifier[sc_messages] . identifier[ServicecontrolServicesAllocateQuotaRequest] )
keyword[return] identifier[flushed_items]
|
def flush(self):
"""Flushes this instance's cache.
The driver of this instance should call this method every
`flush_interval`.
Returns:
list['ServicecontrolServicesAllocateQuotaRequest']: corresponding
to AllocateQuotaRequests that were pending
"""
if self._cache is None:
return [] # depends on [control=['if'], data=[]]
with self._cache as c, self._out as out:
c.expire()
now = self._timer()
for item in c.values():
if not self._in_flush_all and (not self._should_expire(item)):
if not item.is_in_flight and item._op_aggregator is not None:
item.is_in_flight = True
item.last_refresh_timestamp = now
out.append(item.extract_request()) # pylint: disable=no-member # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
flushed_items = list(out)
out.clear() # pylint: disable=no-member
for req in flushed_items:
assert isinstance(req, sc_messages.ServicecontrolServicesAllocateQuotaRequest) # depends on [control=['for'], data=['req']]
return flushed_items # depends on [control=['with'], data=['c']]
|
def differences(scansion: str, candidate: str) -> List[int]:
"""
Given two strings, return a list of index positions where the contents differ.
:param scansion:
:param candidate:
:return:
>>> differences("abc", "abz")
[2]
"""
before = scansion.replace(" ", "")
after = candidate.replace(" ", "")
diffs = []
for idx, tmp in enumerate(before):
if before[idx] != after[idx]:
diffs.append(idx)
return diffs
|
def function[differences, parameter[scansion, candidate]]:
constant[
Given two strings, return a list of index positions where the contents differ.
:param scansion:
:param candidate:
:return:
>>> differences("abc", "abz")
[2]
]
variable[before] assign[=] call[name[scansion].replace, parameter[constant[ ], constant[]]]
variable[after] assign[=] call[name[candidate].replace, parameter[constant[ ], constant[]]]
variable[diffs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18eb54fa0>, <ast.Name object at 0x7da18eb55210>]]] in starred[call[name[enumerate], parameter[name[before]]]] begin[:]
if compare[call[name[before]][name[idx]] not_equal[!=] call[name[after]][name[idx]]] begin[:]
call[name[diffs].append, parameter[name[idx]]]
return[name[diffs]]
|
keyword[def] identifier[differences] ( identifier[scansion] : identifier[str] , identifier[candidate] : identifier[str] )-> identifier[List] [ identifier[int] ]:
literal[string]
identifier[before] = identifier[scansion] . identifier[replace] ( literal[string] , literal[string] )
identifier[after] = identifier[candidate] . identifier[replace] ( literal[string] , literal[string] )
identifier[diffs] =[]
keyword[for] identifier[idx] , identifier[tmp] keyword[in] identifier[enumerate] ( identifier[before] ):
keyword[if] identifier[before] [ identifier[idx] ]!= identifier[after] [ identifier[idx] ]:
identifier[diffs] . identifier[append] ( identifier[idx] )
keyword[return] identifier[diffs]
|
def differences(scansion: str, candidate: str) -> List[int]:
"""
Given two strings, return a list of index positions where the contents differ.
:param scansion:
:param candidate:
:return:
>>> differences("abc", "abz")
[2]
"""
before = scansion.replace(' ', '')
after = candidate.replace(' ', '')
diffs = []
for (idx, tmp) in enumerate(before):
if before[idx] != after[idx]:
diffs.append(idx) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return diffs
|
def make_2d(array, verbose=True):
"""
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
"""
array = np.asarray(array)
if array.ndim < 2:
msg = 'Expected 2D input data array, but found {}D. '\
'Expanding to 2D.'.format(array.ndim)
if verbose:
warnings.warn(msg)
array = np.atleast_1d(array)[:,None]
return array
|
def function[make_2d, parameter[array, verbose]]:
constant[
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
]
variable[array] assign[=] call[name[np].asarray, parameter[name[array]]]
if compare[name[array].ndim less[<] constant[2]] begin[:]
variable[msg] assign[=] call[constant[Expected 2D input data array, but found {}D. Expanding to 2D.].format, parameter[name[array].ndim]]
if name[verbose] begin[:]
call[name[warnings].warn, parameter[name[msg]]]
variable[array] assign[=] call[call[name[np].atleast_1d, parameter[name[array]]]][tuple[[<ast.Slice object at 0x7da1b26ac5e0>, <ast.Constant object at 0x7da1b26ad450>]]]
return[name[array]]
|
keyword[def] identifier[make_2d] ( identifier[array] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[array] = identifier[np] . identifier[asarray] ( identifier[array] )
keyword[if] identifier[array] . identifier[ndim] < literal[int] :
identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[array] . identifier[ndim] )
keyword[if] identifier[verbose] :
identifier[warnings] . identifier[warn] ( identifier[msg] )
identifier[array] = identifier[np] . identifier[atleast_1d] ( identifier[array] )[:, keyword[None] ]
keyword[return] identifier[array]
|
def make_2d(array, verbose=True):
"""
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
"""
array = np.asarray(array)
if array.ndim < 2:
msg = 'Expected 2D input data array, but found {}D. Expanding to 2D.'.format(array.ndim)
if verbose:
warnings.warn(msg) # depends on [control=['if'], data=[]]
array = np.atleast_1d(array)[:, None] # depends on [control=['if'], data=[]]
return array
|
def edit(self, billing_email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, description=github.GithubObject.NotSet, email=github.GithubObject.NotSet, location=github.GithubObject.NotSet, name=github.GithubObject.NotSet):
"""
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param description: string
:param email: string
:param location: string
:param name: string
:rtype: None
"""
assert billing_email is github.GithubObject.NotSet or isinstance(billing_email, (str, unicode)), billing_email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
post_parameters = dict()
if billing_email is not github.GithubObject.NotSet:
post_parameters["billing_email"] = billing_email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
|
def function[edit, parameter[self, billing_email, blog, company, description, email, location, name]]:
constant[
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param description: string
:param email: string
:param location: string
:param name: string
:rtype: None
]
assert[<ast.BoolOp object at 0x7da1b21a0ac0>]
assert[<ast.BoolOp object at 0x7da1b21a07f0>]
assert[<ast.BoolOp object at 0x7da1b21a0670>]
assert[<ast.BoolOp object at 0x7da1b21a3c10>]
assert[<ast.BoolOp object at 0x7da18dc98070>]
assert[<ast.BoolOp object at 0x7da18dc988b0>]
assert[<ast.BoolOp object at 0x7da1b21a3580>]
variable[post_parameters] assign[=] call[name[dict], parameter[]]
if compare[name[billing_email] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[billing_email]] assign[=] name[billing_email]
if compare[name[blog] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[blog]] assign[=] name[blog]
if compare[name[company] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[company]] assign[=] name[company]
if compare[name[description] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[description]] assign[=] name[description]
if compare[name[email] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[email]] assign[=] name[email]
if compare[name[location] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[location]] assign[=] name[location]
if compare[name[name] is_not name[github].GithubObject.NotSet] begin[:]
call[name[post_parameters]][constant[name]] assign[=] name[name]
<ast.Tuple object at 0x7da18f8104c0> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[PATCH], name[self].url]]
call[name[self]._useAttributes, parameter[name[data]]]
|
keyword[def] identifier[edit] ( identifier[self] , identifier[billing_email] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[blog] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[company] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[description] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[email] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[location] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[name] = identifier[github] . identifier[GithubObject] . identifier[NotSet] ):
literal[string]
keyword[assert] identifier[billing_email] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[billing_email] ,( identifier[str] , identifier[unicode] )), identifier[billing_email]
keyword[assert] identifier[blog] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[blog] ,( identifier[str] , identifier[unicode] )), identifier[blog]
keyword[assert] identifier[company] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[company] ,( identifier[str] , identifier[unicode] )), identifier[company]
keyword[assert] identifier[description] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[description] ,( identifier[str] , identifier[unicode] )), identifier[description]
keyword[assert] identifier[email] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[email] ,( identifier[str] , identifier[unicode] )), identifier[email]
keyword[assert] identifier[location] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[location] ,( identifier[str] , identifier[unicode] )), identifier[location]
keyword[assert] identifier[name] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[name] ,( identifier[str] , identifier[unicode] )), identifier[name]
identifier[post_parameters] = identifier[dict] ()
keyword[if] identifier[billing_email] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[billing_email]
keyword[if] identifier[blog] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[blog]
keyword[if] identifier[company] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[company]
keyword[if] identifier[description] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[description]
keyword[if] identifier[email] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[email]
keyword[if] identifier[location] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[location]
keyword[if] identifier[name] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[post_parameters] [ literal[string] ]= identifier[name]
identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] (
literal[string] ,
identifier[self] . identifier[url] ,
identifier[input] = identifier[post_parameters]
)
identifier[self] . identifier[_useAttributes] ( identifier[data] )
|
def edit(self, billing_email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, description=github.GithubObject.NotSet, email=github.GithubObject.NotSet, location=github.GithubObject.NotSet, name=github.GithubObject.NotSet):
"""
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param description: string
:param email: string
:param location: string
:param name: string
:rtype: None
"""
assert billing_email is github.GithubObject.NotSet or isinstance(billing_email, (str, unicode)), billing_email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
post_parameters = dict()
if billing_email is not github.GithubObject.NotSet:
post_parameters['billing_email'] = billing_email # depends on [control=['if'], data=['billing_email']]
if blog is not github.GithubObject.NotSet:
post_parameters['blog'] = blog # depends on [control=['if'], data=['blog']]
if company is not github.GithubObject.NotSet:
post_parameters['company'] = company # depends on [control=['if'], data=['company']]
if description is not github.GithubObject.NotSet:
post_parameters['description'] = description # depends on [control=['if'], data=['description']]
if email is not github.GithubObject.NotSet:
post_parameters['email'] = email # depends on [control=['if'], data=['email']]
if location is not github.GithubObject.NotSet:
post_parameters['location'] = location # depends on [control=['if'], data=['location']]
if name is not github.GithubObject.NotSet:
post_parameters['name'] = name # depends on [control=['if'], data=['name']]
(headers, data) = self._requester.requestJsonAndCheck('PATCH', self.url, input=post_parameters)
self._useAttributes(data)
|
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
self._extract_members(members, path, pwd)
|
def function[extractall, parameter[self, path, members, pwd]]:
constant[Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
]
if compare[name[members] is constant[None]] begin[:]
variable[members] assign[=] call[name[self].namelist, parameter[]]
call[name[self]._extract_members, parameter[name[members], name[path], name[pwd]]]
|
keyword[def] identifier[extractall] ( identifier[self] , identifier[path] = keyword[None] , identifier[members] = keyword[None] , identifier[pwd] = keyword[None] ):
literal[string]
keyword[if] identifier[members] keyword[is] keyword[None] :
identifier[members] = identifier[self] . identifier[namelist] ()
identifier[self] . identifier[_extract_members] ( identifier[members] , identifier[path] , identifier[pwd] )
|
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist() # depends on [control=['if'], data=['members']]
self._extract_members(members, path, pwd)
|
def facts_refresh():
'''
Reload the facts dictionary from the device. Usually only needed if,
the device configuration is changed by some other actor.
This function will also refresh the facts stored in the salt grains.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.facts_refresh
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.facts_refresh()
except Exception as exception:
ret['message'] = 'Execution failed due to "{0}"'.format(exception)
ret['out'] = False
return ret
ret['facts'] = __proxy__['junos.get_serialized_facts']()
try:
__salt__['saltutil.sync_grains']()
except Exception as exception:
log.error('Grains could not be updated due to "%s"', exception)
return ret
|
def function[facts_refresh, parameter[]]:
constant[
Reload the facts dictionary from the device. Usually only needed if,
the device configuration is changed by some other actor.
This function will also refresh the facts stored in the salt grains.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.facts_refresh
]
variable[conn] assign[=] call[call[name[__proxy__]][constant[junos.conn]], parameter[]]
variable[ret] assign[=] dictionary[[], []]
call[name[ret]][constant[out]] assign[=] constant[True]
<ast.Try object at 0x7da18f722dd0>
call[name[ret]][constant[facts]] assign[=] call[call[name[__proxy__]][constant[junos.get_serialized_facts]], parameter[]]
<ast.Try object at 0x7da18f722470>
return[name[ret]]
|
keyword[def] identifier[facts_refresh] ():
literal[string]
identifier[conn] = identifier[__proxy__] [ literal[string] ]()
identifier[ret] ={}
identifier[ret] [ literal[string] ]= keyword[True]
keyword[try] :
identifier[conn] . identifier[facts_refresh] ()
keyword[except] identifier[Exception] keyword[as] identifier[exception] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[exception] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= identifier[__proxy__] [ literal[string] ]()
keyword[try] :
identifier[__salt__] [ literal[string] ]()
keyword[except] identifier[Exception] keyword[as] identifier[exception] :
identifier[log] . identifier[error] ( literal[string] , identifier[exception] )
keyword[return] identifier[ret]
|
def facts_refresh():
"""
Reload the facts dictionary from the device. Usually only needed if,
the device configuration is changed by some other actor.
This function will also refresh the facts stored in the salt grains.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.facts_refresh
"""
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.facts_refresh() # depends on [control=['try'], data=[]]
except Exception as exception:
ret['message'] = 'Execution failed due to "{0}"'.format(exception)
ret['out'] = False
return ret # depends on [control=['except'], data=['exception']]
ret['facts'] = __proxy__['junos.get_serialized_facts']()
try:
__salt__['saltutil.sync_grains']() # depends on [control=['try'], data=[]]
except Exception as exception:
log.error('Grains could not be updated due to "%s"', exception) # depends on [control=['except'], data=['exception']]
return ret
|
def MERGE(*args):
"""
Wipe repeated dependencies from a list of (Analysis, id, filename) tuples,
supplied as argument. Replace id with the correct filename.
"""
# Get the longest common path
common_prefix = os.path.dirname(os.path.commonprefix([os.path.abspath(a.scripts[-1][1]) for a, _, _ in args]))
if common_prefix[-1] != os.sep:
common_prefix += os.sep
logger.info("Common prefix: %s", common_prefix)
# Adjust dependencies for each Analysis object; the first Analysis in the
# list will include all dependencies.
id_to_path = {}
for _, i, p in args:
id_to_path[i] = p
dependencies = {}
for analysis, _, _ in args:
path = os.path.abspath(analysis.scripts[-1][1]).replace(common_prefix, "", 1)
path = os.path.splitext(path)[0]
if path in id_to_path:
path = id_to_path[path]
set_dependencies(analysis, dependencies, path)
|
def function[MERGE, parameter[]]:
constant[
Wipe repeated dependencies from a list of (Analysis, id, filename) tuples,
supplied as argument. Replace id with the correct filename.
]
variable[common_prefix] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.commonprefix, parameter[<ast.ListComp object at 0x7da18f721510>]]]]
if compare[call[name[common_prefix]][<ast.UnaryOp object at 0x7da1b0ed4850>] not_equal[!=] name[os].sep] begin[:]
<ast.AugAssign object at 0x7da1b0ed5030>
call[name[logger].info, parameter[constant[Common prefix: %s], name[common_prefix]]]
variable[id_to_path] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0ed50f0>, <ast.Name object at 0x7da1b0ed4f40>, <ast.Name object at 0x7da1b0ed4130>]]] in starred[name[args]] begin[:]
call[name[id_to_path]][name[i]] assign[=] name[p]
variable[dependencies] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0ed4bb0>, <ast.Name object at 0x7da2041d8130>, <ast.Name object at 0x7da2041da950>]]] in starred[name[args]] begin[:]
variable[path] assign[=] call[call[name[os].path.abspath, parameter[call[call[name[analysis].scripts][<ast.UnaryOp object at 0x7da2041d8640>]][constant[1]]]].replace, parameter[name[common_prefix], constant[], constant[1]]]
variable[path] assign[=] call[call[name[os].path.splitext, parameter[name[path]]]][constant[0]]
if compare[name[path] in name[id_to_path]] begin[:]
variable[path] assign[=] call[name[id_to_path]][name[path]]
call[name[set_dependencies], parameter[name[analysis], name[dependencies], name[path]]]
|
keyword[def] identifier[MERGE] (* identifier[args] ):
literal[string]
identifier[common_prefix] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[os] . identifier[path] . identifier[abspath] ( identifier[a] . identifier[scripts] [- literal[int] ][ literal[int] ]) keyword[for] identifier[a] , identifier[_] , identifier[_] keyword[in] identifier[args] ]))
keyword[if] identifier[common_prefix] [- literal[int] ]!= identifier[os] . identifier[sep] :
identifier[common_prefix] += identifier[os] . identifier[sep]
identifier[logger] . identifier[info] ( literal[string] , identifier[common_prefix] )
identifier[id_to_path] ={}
keyword[for] identifier[_] , identifier[i] , identifier[p] keyword[in] identifier[args] :
identifier[id_to_path] [ identifier[i] ]= identifier[p]
identifier[dependencies] ={}
keyword[for] identifier[analysis] , identifier[_] , identifier[_] keyword[in] identifier[args] :
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[analysis] . identifier[scripts] [- literal[int] ][ literal[int] ]). identifier[replace] ( identifier[common_prefix] , literal[string] , literal[int] )
identifier[path] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[path] )[ literal[int] ]
keyword[if] identifier[path] keyword[in] identifier[id_to_path] :
identifier[path] = identifier[id_to_path] [ identifier[path] ]
identifier[set_dependencies] ( identifier[analysis] , identifier[dependencies] , identifier[path] )
|
def MERGE(*args):
"""
Wipe repeated dependencies from a list of (Analysis, id, filename) tuples,
supplied as argument. Replace id with the correct filename.
"""
# Get the longest common path
common_prefix = os.path.dirname(os.path.commonprefix([os.path.abspath(a.scripts[-1][1]) for (a, _, _) in args]))
if common_prefix[-1] != os.sep:
common_prefix += os.sep # depends on [control=['if'], data=[]]
logger.info('Common prefix: %s', common_prefix)
# Adjust dependencies for each Analysis object; the first Analysis in the
# list will include all dependencies.
id_to_path = {}
for (_, i, p) in args:
id_to_path[i] = p # depends on [control=['for'], data=[]]
dependencies = {}
for (analysis, _, _) in args:
path = os.path.abspath(analysis.scripts[-1][1]).replace(common_prefix, '', 1)
path = os.path.splitext(path)[0]
if path in id_to_path:
path = id_to_path[path] # depends on [control=['if'], data=['path', 'id_to_path']]
set_dependencies(analysis, dependencies, path) # depends on [control=['for'], data=[]]
|
def _make_pretty_deprecated(deprecated):
"""
Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
...
"""
if deprecated != "":
deprecated = "\n".join(map(lambda n: n[4:], deprecated.split("\n")))
return "**Deprecated:**\n%s\n" % deprecated
|
def function[_make_pretty_deprecated, parameter[deprecated]]:
constant[
Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
...
]
if compare[name[deprecated] not_equal[!=] constant[]] begin[:]
variable[deprecated] assign[=] call[constant[
].join, parameter[call[name[map], parameter[<ast.Lambda object at 0x7da18dc9a0b0>, call[name[deprecated].split, parameter[constant[
]]]]]]]
return[binary_operation[constant[**Deprecated:**
%s
] <ast.Mod object at 0x7da2590d6920> name[deprecated]]]
|
keyword[def] identifier[_make_pretty_deprecated] ( identifier[deprecated] ):
literal[string]
keyword[if] identifier[deprecated] != literal[string] :
identifier[deprecated] = literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[n] : identifier[n] [ literal[int] :], identifier[deprecated] . identifier[split] ( literal[string] )))
keyword[return] literal[string] % identifier[deprecated]
|
def _make_pretty_deprecated(deprecated):
"""
Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
...
"""
if deprecated != '':
deprecated = '\n'.join(map(lambda n: n[4:], deprecated.split('\n')))
return '**Deprecated:**\n%s\n' % deprecated # depends on [control=['if'], data=['deprecated']]
|
def add_menu(self, menu):
'''add to the default popup menu'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
self.default_popup.add(menu)
self.map.add_object(mp_slipmap.SlipDefaultPopup(self.default_popup, combine=True))
|
def function[add_menu, parameter[self, menu]]:
constant[add to the default popup menu]
from relative_module[MAVProxy.modules.mavproxy_map] import module[mp_slipmap]
call[name[self].default_popup.add, parameter[name[menu]]]
call[name[self].map.add_object, parameter[call[name[mp_slipmap].SlipDefaultPopup, parameter[name[self].default_popup]]]]
|
keyword[def] identifier[add_menu] ( identifier[self] , identifier[menu] ):
literal[string]
keyword[from] identifier[MAVProxy] . identifier[modules] . identifier[mavproxy_map] keyword[import] identifier[mp_slipmap]
identifier[self] . identifier[default_popup] . identifier[add] ( identifier[menu] )
identifier[self] . identifier[map] . identifier[add_object] ( identifier[mp_slipmap] . identifier[SlipDefaultPopup] ( identifier[self] . identifier[default_popup] , identifier[combine] = keyword[True] ))
|
def add_menu(self, menu):
"""add to the default popup menu"""
from MAVProxy.modules.mavproxy_map import mp_slipmap
self.default_popup.add(menu)
self.map.add_object(mp_slipmap.SlipDefaultPopup(self.default_popup, combine=True))
|
def run(hostname=None, port=None, path=None, loop=None):
"""
The arguments are not all optional. Either a path or hostname+port should
be specified; you have to specify one.
"""
if path:
log.debug("Starting Opentrons server application on {}".format(
path))
hostname, port = None, None
else:
log.debug("Starting Opentrons server application on {}:{}".format(
hostname, port))
path = None
web.run_app(init(loop), host=hostname, port=port, path=path)
|
def function[run, parameter[hostname, port, path, loop]]:
constant[
The arguments are not all optional. Either a path or hostname+port should
be specified; you have to specify one.
]
if name[path] begin[:]
call[name[log].debug, parameter[call[constant[Starting Opentrons server application on {}].format, parameter[name[path]]]]]
<ast.Tuple object at 0x7da1b26ac580> assign[=] tuple[[<ast.Constant object at 0x7da1b26ac730>, <ast.Constant object at 0x7da1b26ae0e0>]]
call[name[web].run_app, parameter[call[name[init], parameter[name[loop]]]]]
|
keyword[def] identifier[run] ( identifier[hostname] = keyword[None] , identifier[port] = keyword[None] , identifier[path] = keyword[None] , identifier[loop] = keyword[None] ):
literal[string]
keyword[if] identifier[path] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] (
identifier[path] ))
identifier[hostname] , identifier[port] = keyword[None] , keyword[None]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] (
identifier[hostname] , identifier[port] ))
identifier[path] = keyword[None]
identifier[web] . identifier[run_app] ( identifier[init] ( identifier[loop] ), identifier[host] = identifier[hostname] , identifier[port] = identifier[port] , identifier[path] = identifier[path] )
|
def run(hostname=None, port=None, path=None, loop=None):
"""
The arguments are not all optional. Either a path or hostname+port should
be specified; you have to specify one.
"""
if path:
log.debug('Starting Opentrons server application on {}'.format(path))
(hostname, port) = (None, None) # depends on [control=['if'], data=[]]
else:
log.debug('Starting Opentrons server application on {}:{}'.format(hostname, port))
path = None
web.run_app(init(loop), host=hostname, port=port, path=path)
|
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
|
def function[levenshtein, parameter[left, right]]:
constant[Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
]
variable[sc] assign[=] name[SparkContext]._active_spark_context
variable[jc] assign[=] call[name[sc]._jvm.functions.levenshtein, parameter[call[name[_to_java_column], parameter[name[left]]], call[name[_to_java_column], parameter[name[right]]]]]
return[call[name[Column], parameter[name[jc]]]]
|
keyword[def] identifier[levenshtein] ( identifier[left] , identifier[right] ):
literal[string]
identifier[sc] = identifier[SparkContext] . identifier[_active_spark_context]
identifier[jc] = identifier[sc] . identifier[_jvm] . identifier[functions] . identifier[levenshtein] ( identifier[_to_java_column] ( identifier[left] ), identifier[_to_java_column] ( identifier[right] ))
keyword[return] identifier[Column] ( identifier[jc] )
|
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
|
def responseInColor(request, status, headers, prefix='Response', opts=None):
"Prints the response info in color"
code, message = status.split(None, 1)
message = '%s [%s] => Request %s %s %s on pid %d' % (
prefix,
code,
str(request.host),
request.method,
request.path,
os.getpid()
)
signal = int(code) / 100
if signal == 2:
chalk.green(message, opts=opts)
elif signal == 3:
chalk.blue(message, opts=opts)
else:
chalk.red(message, opts=opts)
|
def function[responseInColor, parameter[request, status, headers, prefix, opts]]:
constant[Prints the response info in color]
<ast.Tuple object at 0x7da207f99210> assign[=] call[name[status].split, parameter[constant[None], constant[1]]]
variable[message] assign[=] binary_operation[constant[%s [%s] => Request %s %s %s on pid %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f9ae60>, <ast.Name object at 0x7da207f98d60>, <ast.Call object at 0x7da207f9a500>, <ast.Attribute object at 0x7da207f98c70>, <ast.Attribute object at 0x7da207f99810>, <ast.Call object at 0x7da207f9bdf0>]]]
variable[signal] assign[=] binary_operation[call[name[int], parameter[name[code]]] / constant[100]]
if compare[name[signal] equal[==] constant[2]] begin[:]
call[name[chalk].green, parameter[name[message]]]
|
keyword[def] identifier[responseInColor] ( identifier[request] , identifier[status] , identifier[headers] , identifier[prefix] = literal[string] , identifier[opts] = keyword[None] ):
literal[string]
identifier[code] , identifier[message] = identifier[status] . identifier[split] ( keyword[None] , literal[int] )
identifier[message] = literal[string] %(
identifier[prefix] ,
identifier[code] ,
identifier[str] ( identifier[request] . identifier[host] ),
identifier[request] . identifier[method] ,
identifier[request] . identifier[path] ,
identifier[os] . identifier[getpid] ()
)
identifier[signal] = identifier[int] ( identifier[code] )/ literal[int]
keyword[if] identifier[signal] == literal[int] :
identifier[chalk] . identifier[green] ( identifier[message] , identifier[opts] = identifier[opts] )
keyword[elif] identifier[signal] == literal[int] :
identifier[chalk] . identifier[blue] ( identifier[message] , identifier[opts] = identifier[opts] )
keyword[else] :
identifier[chalk] . identifier[red] ( identifier[message] , identifier[opts] = identifier[opts] )
|
def responseInColor(request, status, headers, prefix='Response', opts=None):
"""Prints the response info in color"""
(code, message) = status.split(None, 1)
message = '%s [%s] => Request %s %s %s on pid %d' % (prefix, code, str(request.host), request.method, request.path, os.getpid())
signal = int(code) / 100
if signal == 2:
chalk.green(message, opts=opts) # depends on [control=['if'], data=[]]
elif signal == 3:
chalk.blue(message, opts=opts) # depends on [control=['if'], data=[]]
else:
chalk.red(message, opts=opts)
|
def parameters(self):
"""
Get the dictionary of parameters (either ra,dec or l,b)
:return: dictionary of parameters
"""
if self._coord_type == 'galactic':
return collections.OrderedDict((('l', self.l), ('b', self.b)))
else:
return collections.OrderedDict((('ra', self.ra), ('dec', self.dec)))
|
def function[parameters, parameter[self]]:
constant[
Get the dictionary of parameters (either ra,dec or l,b)
:return: dictionary of parameters
]
if compare[name[self]._coord_type equal[==] constant[galactic]] begin[:]
return[call[name[collections].OrderedDict, parameter[tuple[[<ast.Tuple object at 0x7da18bcc82b0>, <ast.Tuple object at 0x7da18bcc9840>]]]]]
|
keyword[def] identifier[parameters] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_coord_type] == literal[string] :
keyword[return] identifier[collections] . identifier[OrderedDict] ((( literal[string] , identifier[self] . identifier[l] ),( literal[string] , identifier[self] . identifier[b] )))
keyword[else] :
keyword[return] identifier[collections] . identifier[OrderedDict] ((( literal[string] , identifier[self] . identifier[ra] ),( literal[string] , identifier[self] . identifier[dec] )))
|
def parameters(self):
"""
Get the dictionary of parameters (either ra,dec or l,b)
:return: dictionary of parameters
"""
if self._coord_type == 'galactic':
return collections.OrderedDict((('l', self.l), ('b', self.b))) # depends on [control=['if'], data=[]]
else:
return collections.OrderedDict((('ra', self.ra), ('dec', self.dec)))
|
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
|
def function[do_install, parameter[ctx, verbose, fake]]:
constant[Installs legit git aliases.]
call[name[click].echo, parameter[constant[The following git aliases will be installed:
]]]
variable[aliases] assign[=] call[name[cli].list_commands, parameter[name[ctx]]]
call[name[output_aliases], parameter[name[aliases]]]
if call[name[click].confirm, parameter[call[constant[
{}Install aliases above?].format, parameter[<ast.IfExp object at 0x7da1b1950850>]]]] begin[:]
for taget[name[alias]] in starred[name[aliases]] begin[:]
variable[cmd] assign[=] binary_operation[constant[!legit ] + name[alias]]
variable[system_command] assign[=] call[constant[git config --global --replace-all alias.{0} "{1}"].format, parameter[name[alias], name[cmd]]]
call[name[verbose_echo], parameter[name[system_command], name[verbose], name[fake]]]
if <ast.UnaryOp object at 0x7da1b1950fa0> begin[:]
call[name[os].system, parameter[name[system_command]]]
if <ast.UnaryOp object at 0x7da1b1953a30> begin[:]
call[name[click].echo, parameter[constant[
Aliases installed.]]]
|
keyword[def] identifier[do_install] ( identifier[ctx] , identifier[verbose] , identifier[fake] ):
literal[string]
identifier[click] . identifier[echo] ( literal[string] )
identifier[aliases] = identifier[cli] . identifier[list_commands] ( identifier[ctx] )
identifier[output_aliases] ( identifier[aliases] )
keyword[if] identifier[click] . identifier[confirm] ( literal[string] . identifier[format] ( literal[string] keyword[if] identifier[fake] keyword[else] literal[string] ), identifier[default] = identifier[fake] ):
keyword[for] identifier[alias] keyword[in] identifier[aliases] :
identifier[cmd] = literal[string] + identifier[alias]
identifier[system_command] = literal[string] . identifier[format] ( identifier[alias] , identifier[cmd] )
identifier[verbose_echo] ( identifier[system_command] , identifier[verbose] , identifier[fake] )
keyword[if] keyword[not] identifier[fake] :
identifier[os] . identifier[system] ( identifier[system_command] )
keyword[if] keyword[not] identifier[fake] :
identifier[click] . identifier[echo] ( literal[string] )
keyword[else] :
identifier[click] . identifier[echo] ( literal[string] )
|
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['alias']]
if not fake:
click.echo('\nAliases installed.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
click.echo('\nAliases will not be installed.')
|
def _format_exitcodes(exitcodes):
"""Format a list of exit code with names of the signals if possible"""
str_exitcodes = ["{}({})".format(_get_exitcode_name(e), e)
for e in exitcodes if e is not None]
return "{" + ", ".join(str_exitcodes) + "}"
|
def function[_format_exitcodes, parameter[exitcodes]]:
constant[Format a list of exit code with names of the signals if possible]
variable[str_exitcodes] assign[=] <ast.ListComp object at 0x7da1b05bf400>
return[binary_operation[binary_operation[constant[{] + call[constant[, ].join, parameter[name[str_exitcodes]]]] + constant[}]]]
|
keyword[def] identifier[_format_exitcodes] ( identifier[exitcodes] ):
literal[string]
identifier[str_exitcodes] =[ literal[string] . identifier[format] ( identifier[_get_exitcode_name] ( identifier[e] ), identifier[e] )
keyword[for] identifier[e] keyword[in] identifier[exitcodes] keyword[if] identifier[e] keyword[is] keyword[not] keyword[None] ]
keyword[return] literal[string] + literal[string] . identifier[join] ( identifier[str_exitcodes] )+ literal[string]
|
def _format_exitcodes(exitcodes):
"""Format a list of exit code with names of the signals if possible"""
str_exitcodes = ['{}({})'.format(_get_exitcode_name(e), e) for e in exitcodes if e is not None]
return '{' + ', '.join(str_exitcodes) + '}'
|
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
|
def function[email_url_config, parameter[cls, url, backend]]:
constant[Parses an email URL.]
variable[config] assign[=] dictionary[[], []]
variable[url] assign[=] <ast.IfExp object at 0x7da1b22bbbb0>
variable[path] assign[=] call[name[url].path][<ast.Slice object at 0x7da1b22b95a0>]
variable[path] assign[=] call[name[unquote_plus], parameter[call[call[name[path].split, parameter[constant[?], constant[2]]]][constant[0]]]]
call[name[config].update, parameter[dictionary[[<ast.Constant object at 0x7da1b22b9720>, <ast.Constant object at 0x7da1b22bb310>, <ast.Constant object at 0x7da1b22bb3d0>, <ast.Constant object at 0x7da1b22b8340>, <ast.Constant object at 0x7da1b22b9510>], [<ast.Name object at 0x7da1b22bb820>, <ast.Call object at 0x7da1b22b9630>, <ast.Call object at 0x7da1b22b9990>, <ast.Attribute object at 0x7da1b22b82b0>, <ast.Call object at 0x7da1b22baf50>]]]]
if name[backend] begin[:]
call[name[config]][constant[EMAIL_BACKEND]] assign[=] name[backend]
if compare[name[url].scheme in tuple[[<ast.Constant object at 0x7da1b22b9d80>, <ast.Constant object at 0x7da1b22ba860>]]] begin[:]
call[name[config]][constant[EMAIL_USE_TLS]] assign[=] constant[True]
if name[url].query begin[:]
variable[config_options] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b22bbc40>, <ast.Name object at 0x7da1b22b84c0>]]] in starred[call[call[name[parse_qs], parameter[name[url].query]].items, parameter[]]] begin[:]
variable[opt] assign[=] dictionary[[<ast.Call object at 0x7da1b22b95d0>], [<ast.Call object at 0x7da1b22bae90>]]
if compare[call[name[k].upper, parameter[]] in name[cls]._EMAIL_BASE_OPTIONS] begin[:]
call[name[config].update, parameter[name[opt]]]
call[name[config]][constant[OPTIONS]] assign[=] name[config_options]
return[name[config]]
|
keyword[def] identifier[email_url_config] ( identifier[cls] , identifier[url] , identifier[backend] = keyword[None] ):
literal[string]
identifier[config] ={}
identifier[url] = identifier[urlparse] ( identifier[url] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[url] , identifier[cls] . identifier[URL_CLASS] ) keyword[else] identifier[url]
identifier[path] = identifier[url] . identifier[path] [ literal[int] :]
identifier[path] = identifier[unquote_plus] ( identifier[path] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ])
identifier[config] . identifier[update] ({
literal[string] : identifier[path] ,
literal[string] : identifier[_cast_urlstr] ( identifier[url] . identifier[username] ),
literal[string] : identifier[_cast_urlstr] ( identifier[url] . identifier[password] ),
literal[string] : identifier[url] . identifier[hostname] ,
literal[string] : identifier[_cast_int] ( identifier[url] . identifier[port] ),
})
keyword[if] identifier[backend] :
identifier[config] [ literal[string] ]= identifier[backend]
keyword[elif] identifier[url] . identifier[scheme] keyword[not] keyword[in] identifier[cls] . identifier[EMAIL_SCHEMES] :
keyword[raise] identifier[ImproperlyConfigured] ( literal[string] % identifier[url] . identifier[scheme] )
keyword[elif] identifier[url] . identifier[scheme] keyword[in] identifier[cls] . identifier[EMAIL_SCHEMES] :
identifier[config] [ literal[string] ]= identifier[cls] . identifier[EMAIL_SCHEMES] [ identifier[url] . identifier[scheme] ]
keyword[if] identifier[url] . identifier[scheme] keyword[in] ( literal[string] , literal[string] ):
identifier[config] [ literal[string] ]= keyword[True]
keyword[elif] identifier[url] . identifier[scheme] == literal[string] :
identifier[config] [ literal[string] ]= keyword[True]
keyword[if] identifier[url] . identifier[query] :
identifier[config_options] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[parse_qs] ( identifier[url] . identifier[query] ). identifier[items] ():
identifier[opt] ={ identifier[k] . identifier[upper] (): identifier[_cast_int] ( identifier[v] [ literal[int] ])}
keyword[if] identifier[k] . identifier[upper] () keyword[in] identifier[cls] . identifier[_EMAIL_BASE_OPTIONS] :
identifier[config] . identifier[update] ( identifier[opt] )
keyword[else] :
identifier[config_options] . identifier[update] ( identifier[opt] )
identifier[config] [ literal[string] ]= identifier[config_options]
keyword[return] identifier[config]
|
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({'EMAIL_FILE_PATH': path, 'EMAIL_HOST_USER': _cast_urlstr(url.username), 'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password), 'EMAIL_HOST': url.hostname, 'EMAIL_PORT': _cast_int(url.port)})
if backend:
config['EMAIL_BACKEND'] = backend # depends on [control=['if'], data=[]]
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme) # depends on [control=['if'], data=[]]
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme] # depends on [control=['if'], data=[]]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True # depends on [control=['if'], data=[]]
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True # depends on [control=['if'], data=[]]
if url.query:
config_options = {}
for (k, v) in parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt) # depends on [control=['if'], data=[]]
else:
config_options.update(opt) # depends on [control=['for'], data=[]]
config['OPTIONS'] = config_options # depends on [control=['if'], data=[]]
return config
|
def get_my_hostname(self, split_hostname_on_first_period=False):
"""
Returns a best guess for the hostname registered with OpenStack for this host
"""
hostname = self.init_config.get("os_host") or self.hostname
if split_hostname_on_first_period:
hostname = hostname.split('.')[0]
return hostname
|
def function[get_my_hostname, parameter[self, split_hostname_on_first_period]]:
constant[
Returns a best guess for the hostname registered with OpenStack for this host
]
variable[hostname] assign[=] <ast.BoolOp object at 0x7da18f00ce80>
if name[split_hostname_on_first_period] begin[:]
variable[hostname] assign[=] call[call[name[hostname].split, parameter[constant[.]]]][constant[0]]
return[name[hostname]]
|
keyword[def] identifier[get_my_hostname] ( identifier[self] , identifier[split_hostname_on_first_period] = keyword[False] ):
literal[string]
identifier[hostname] = identifier[self] . identifier[init_config] . identifier[get] ( literal[string] ) keyword[or] identifier[self] . identifier[hostname]
keyword[if] identifier[split_hostname_on_first_period] :
identifier[hostname] = identifier[hostname] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] identifier[hostname]
|
def get_my_hostname(self, split_hostname_on_first_period=False):
"""
Returns a best guess for the hostname registered with OpenStack for this host
"""
hostname = self.init_config.get('os_host') or self.hostname
if split_hostname_on_first_period:
hostname = hostname.split('.')[0] # depends on [control=['if'], data=[]]
return hostname
|
def trim(s, prefix=None, suffix=None, strict=False):
"""Trim a string, removing given prefix or suffix.
:param s: String to trim
:param prefix: Prefix to remove from ``s``
:param suffix: Suffix to remove from ``s``
:param strict: Whether the prefix or suffix must be present in ``s``.
By default, if the infix is not found, function will simply
return the string passed.
Either ``prefix`` or ``suffix`` must be provided, but not both
(which would be ambiguous as to what to remove first).
:raise ValueError: If ``strict`` is True and required prefix or suffix
was not found
Examples::
trim('foobar', prefix='foo') # 'bar'
trim('foobar', suffix='bar') # 'foo'
trim('foobar', prefix='baz', strict=True) # exception
.. versionadded:: 0.0.4
"""
ensure_string(s)
has_prefix = prefix is not None
has_suffix = suffix is not None
if has_prefix == has_suffix:
raise ValueError(
"exactly one of either prefix or suffix must be provided")
if has_prefix:
ensure_string(prefix)
if s.startswith(prefix):
return s[len(prefix):]
elif strict:
raise ValueError(
"string %r does not start with expected prefix %r" % (
s, prefix))
if has_suffix:
ensure_string(suffix)
if s.endswith(suffix):
return s[:-len(suffix)] if suffix else s
elif strict:
raise ValueError(
"string %r does not end with expected suffix %r" % (
s, suffix))
return s
|
def function[trim, parameter[s, prefix, suffix, strict]]:
constant[Trim a string, removing given prefix or suffix.
:param s: String to trim
:param prefix: Prefix to remove from ``s``
:param suffix: Suffix to remove from ``s``
:param strict: Whether the prefix or suffix must be present in ``s``.
By default, if the infix is not found, function will simply
return the string passed.
Either ``prefix`` or ``suffix`` must be provided, but not both
(which would be ambiguous as to what to remove first).
:raise ValueError: If ``strict`` is True and required prefix or suffix
was not found
Examples::
trim('foobar', prefix='foo') # 'bar'
trim('foobar', suffix='bar') # 'foo'
trim('foobar', prefix='baz', strict=True) # exception
.. versionadded:: 0.0.4
]
call[name[ensure_string], parameter[name[s]]]
variable[has_prefix] assign[=] compare[name[prefix] is_not constant[None]]
variable[has_suffix] assign[=] compare[name[suffix] is_not constant[None]]
if compare[name[has_prefix] equal[==] name[has_suffix]] begin[:]
<ast.Raise object at 0x7da1b236bb20>
if name[has_prefix] begin[:]
call[name[ensure_string], parameter[name[prefix]]]
if call[name[s].startswith, parameter[name[prefix]]] begin[:]
return[call[name[s]][<ast.Slice object at 0x7da1b2369870>]]
if name[has_suffix] begin[:]
call[name[ensure_string], parameter[name[suffix]]]
if call[name[s].endswith, parameter[name[suffix]]] begin[:]
return[<ast.IfExp object at 0x7da1b236b190>]
return[name[s]]
|
keyword[def] identifier[trim] ( identifier[s] , identifier[prefix] = keyword[None] , identifier[suffix] = keyword[None] , identifier[strict] = keyword[False] ):
literal[string]
identifier[ensure_string] ( identifier[s] )
identifier[has_prefix] = identifier[prefix] keyword[is] keyword[not] keyword[None]
identifier[has_suffix] = identifier[suffix] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[has_prefix] == identifier[has_suffix] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] identifier[has_prefix] :
identifier[ensure_string] ( identifier[prefix] )
keyword[if] identifier[s] . identifier[startswith] ( identifier[prefix] ):
keyword[return] identifier[s] [ identifier[len] ( identifier[prefix] ):]
keyword[elif] identifier[strict] :
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[s] , identifier[prefix] ))
keyword[if] identifier[has_suffix] :
identifier[ensure_string] ( identifier[suffix] )
keyword[if] identifier[s] . identifier[endswith] ( identifier[suffix] ):
keyword[return] identifier[s] [:- identifier[len] ( identifier[suffix] )] keyword[if] identifier[suffix] keyword[else] identifier[s]
keyword[elif] identifier[strict] :
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[s] , identifier[suffix] ))
keyword[return] identifier[s]
|
def trim(s, prefix=None, suffix=None, strict=False):
"""Trim a string, removing given prefix or suffix.
:param s: String to trim
:param prefix: Prefix to remove from ``s``
:param suffix: Suffix to remove from ``s``
:param strict: Whether the prefix or suffix must be present in ``s``.
By default, if the infix is not found, function will simply
return the string passed.
Either ``prefix`` or ``suffix`` must be provided, but not both
(which would be ambiguous as to what to remove first).
:raise ValueError: If ``strict`` is True and required prefix or suffix
was not found
Examples::
trim('foobar', prefix='foo') # 'bar'
trim('foobar', suffix='bar') # 'foo'
trim('foobar', prefix='baz', strict=True) # exception
.. versionadded:: 0.0.4
"""
ensure_string(s)
has_prefix = prefix is not None
has_suffix = suffix is not None
if has_prefix == has_suffix:
raise ValueError('exactly one of either prefix or suffix must be provided') # depends on [control=['if'], data=[]]
if has_prefix:
ensure_string(prefix)
if s.startswith(prefix):
return s[len(prefix):] # depends on [control=['if'], data=[]]
elif strict:
raise ValueError('string %r does not start with expected prefix %r' % (s, prefix)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if has_suffix:
ensure_string(suffix)
if s.endswith(suffix):
return s[:-len(suffix)] if suffix else s # depends on [control=['if'], data=[]]
elif strict:
raise ValueError('string %r does not end with expected suffix %r' % (s, suffix)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return s
|
def _get_resolved_alias_name(self, property_name, original_alias_value, intrinsics_resolver):
"""
Alias names can be supplied as an intrinsic function. This method tries to extract alias name from a reference
to a parameter. If it cannot completely resolve (ie. if a complex intrinsic function was used), then this
method raises an exception. If alias name is just a plain string, it will return as is
:param dict or string original_alias_value: Value of Alias property as provided by the customer
:param samtranslator.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Instance of the resolver that
knows how to resolve parameter references
:return string: Alias name
:raises InvalidResourceException: If the value is a complex intrinsic function that cannot be resolved
"""
# Try to resolve.
resolved_alias_name = intrinsics_resolver.resolve_parameter_refs(original_alias_value)
if not isinstance(resolved_alias_name, string_types):
# This is still a dictionary which means we are not able to completely resolve intrinsics
raise InvalidResourceException(self.logical_id,
"'{}' must be a string or a Ref to a template parameter"
.format(property_name))
return resolved_alias_name
|
def function[_get_resolved_alias_name, parameter[self, property_name, original_alias_value, intrinsics_resolver]]:
constant[
Alias names can be supplied as an intrinsic function. This method tries to extract alias name from a reference
to a parameter. If it cannot completely resolve (ie. if a complex intrinsic function was used), then this
method raises an exception. If alias name is just a plain string, it will return as is
:param dict or string original_alias_value: Value of Alias property as provided by the customer
:param samtranslator.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Instance of the resolver that
knows how to resolve parameter references
:return string: Alias name
:raises InvalidResourceException: If the value is a complex intrinsic function that cannot be resolved
]
variable[resolved_alias_name] assign[=] call[name[intrinsics_resolver].resolve_parameter_refs, parameter[name[original_alias_value]]]
if <ast.UnaryOp object at 0x7da20c7968c0> begin[:]
<ast.Raise object at 0x7da20c796290>
return[name[resolved_alias_name]]
|
keyword[def] identifier[_get_resolved_alias_name] ( identifier[self] , identifier[property_name] , identifier[original_alias_value] , identifier[intrinsics_resolver] ):
literal[string]
identifier[resolved_alias_name] = identifier[intrinsics_resolver] . identifier[resolve_parameter_refs] ( identifier[original_alias_value] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[resolved_alias_name] , identifier[string_types] ):
keyword[raise] identifier[InvalidResourceException] ( identifier[self] . identifier[logical_id] ,
literal[string]
. identifier[format] ( identifier[property_name] ))
keyword[return] identifier[resolved_alias_name]
|
def _get_resolved_alias_name(self, property_name, original_alias_value, intrinsics_resolver):
"""
Alias names can be supplied as an intrinsic function. This method tries to extract alias name from a reference
to a parameter. If it cannot completely resolve (ie. if a complex intrinsic function was used), then this
method raises an exception. If alias name is just a plain string, it will return as is
:param dict or string original_alias_value: Value of Alias property as provided by the customer
:param samtranslator.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Instance of the resolver that
knows how to resolve parameter references
:return string: Alias name
:raises InvalidResourceException: If the value is a complex intrinsic function that cannot be resolved
"""
# Try to resolve.
resolved_alias_name = intrinsics_resolver.resolve_parameter_refs(original_alias_value)
if not isinstance(resolved_alias_name, string_types):
# This is still a dictionary which means we are not able to completely resolve intrinsics
raise InvalidResourceException(self.logical_id, "'{}' must be a string or a Ref to a template parameter".format(property_name)) # depends on [control=['if'], data=[]]
return resolved_alias_name
|
def _get_params(self):
"""
Generate SOAP parameters.
"""
params = {'accountNumber': self._service.accountNumber}
# Include object variables that are in field_order
for key, val in self.__dict__.iteritems():
if key in self.field_order:
# Turn into Unicode
if isinstance(val, str,):
val = val.decode('utf8')
params[key] = val
# Set missing parameters as empty strings
for key in self.field_order:
if key not in params:
params[key] = u''
# Parameter sorting method
def order_keys(k):
if k[0] in self.field_order:
return self.field_order.index(k[0])
return len(self.field_order) + 1
# Sort the ordered dictionary
params = OrderedDict(sorted(params.items(), key=order_keys))
# Add hash to dictionary if present
if hasattr(self, 'hash') and self.hash is not None:
params['hash'] = self.hash
return params
|
def function[_get_params, parameter[self]]:
constant[
Generate SOAP parameters.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b18740a0>], [<ast.Attribute object at 0x7da1b1877b80>]]
for taget[tuple[[<ast.Name object at 0x7da1b1876140>, <ast.Name object at 0x7da1b1874fd0>]]] in starred[call[name[self].__dict__.iteritems, parameter[]]] begin[:]
if compare[name[key] in name[self].field_order] begin[:]
if call[name[isinstance], parameter[name[val], name[str]]] begin[:]
variable[val] assign[=] call[name[val].decode, parameter[constant[utf8]]]
call[name[params]][name[key]] assign[=] name[val]
for taget[name[key]] in starred[name[self].field_order] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[params]] begin[:]
call[name[params]][name[key]] assign[=] constant[]
def function[order_keys, parameter[k]]:
if compare[call[name[k]][constant[0]] in name[self].field_order] begin[:]
return[call[name[self].field_order.index, parameter[call[name[k]][constant[0]]]]]
return[binary_operation[call[name[len], parameter[name[self].field_order]] + constant[1]]]
variable[params] assign[=] call[name[OrderedDict], parameter[call[name[sorted], parameter[call[name[params].items, parameter[]]]]]]
if <ast.BoolOp object at 0x7da1b18749a0> begin[:]
call[name[params]][constant[hash]] assign[=] name[self].hash
return[name[params]]
|
keyword[def] identifier[_get_params] ( identifier[self] ):
literal[string]
identifier[params] ={ literal[string] : identifier[self] . identifier[_service] . identifier[accountNumber] }
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[self] . identifier[__dict__] . identifier[iteritems] ():
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[field_order] :
keyword[if] identifier[isinstance] ( identifier[val] , identifier[str] ,):
identifier[val] = identifier[val] . identifier[decode] ( literal[string] )
identifier[params] [ identifier[key] ]= identifier[val]
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[field_order] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[params] :
identifier[params] [ identifier[key] ]= literal[string]
keyword[def] identifier[order_keys] ( identifier[k] ):
keyword[if] identifier[k] [ literal[int] ] keyword[in] identifier[self] . identifier[field_order] :
keyword[return] identifier[self] . identifier[field_order] . identifier[index] ( identifier[k] [ literal[int] ])
keyword[return] identifier[len] ( identifier[self] . identifier[field_order] )+ literal[int]
identifier[params] = identifier[OrderedDict] ( identifier[sorted] ( identifier[params] . identifier[items] (), identifier[key] = identifier[order_keys] ))
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[hash] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[self] . identifier[hash]
keyword[return] identifier[params]
|
def _get_params(self):
"""
Generate SOAP parameters.
"""
params = {'accountNumber': self._service.accountNumber}
# Include object variables that are in field_order
for (key, val) in self.__dict__.iteritems():
if key in self.field_order:
# Turn into Unicode
if isinstance(val, str):
val = val.decode('utf8') # depends on [control=['if'], data=[]]
params[key] = val # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=[]]
# Set missing parameters as empty strings
for key in self.field_order:
if key not in params:
params[key] = u'' # depends on [control=['if'], data=['key', 'params']] # depends on [control=['for'], data=['key']]
# Parameter sorting method
def order_keys(k):
if k[0] in self.field_order:
return self.field_order.index(k[0]) # depends on [control=['if'], data=[]]
return len(self.field_order) + 1
# Sort the ordered dictionary
params = OrderedDict(sorted(params.items(), key=order_keys))
# Add hash to dictionary if present
if hasattr(self, 'hash') and self.hash is not None:
params['hash'] = self.hash # depends on [control=['if'], data=[]]
return params
|
def cmp_mat(a, b):
"""
Sorts two matrices returning a positive or zero value
"""
c = 0
for x, y in zip(a.flat, b.flat):
c = cmp(abs(x), abs(y))
if c != 0:
return c
return c
|
def function[cmp_mat, parameter[a, b]]:
constant[
Sorts two matrices returning a positive or zero value
]
variable[c] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da20e955510>, <ast.Name object at 0x7da20e955180>]]] in starred[call[name[zip], parameter[name[a].flat, name[b].flat]]] begin[:]
variable[c] assign[=] call[name[cmp], parameter[call[name[abs], parameter[name[x]]], call[name[abs], parameter[name[y]]]]]
if compare[name[c] not_equal[!=] constant[0]] begin[:]
return[name[c]]
return[name[c]]
|
keyword[def] identifier[cmp_mat] ( identifier[a] , identifier[b] ):
literal[string]
identifier[c] = literal[int]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[a] . identifier[flat] , identifier[b] . identifier[flat] ):
identifier[c] = identifier[cmp] ( identifier[abs] ( identifier[x] ), identifier[abs] ( identifier[y] ))
keyword[if] identifier[c] != literal[int] :
keyword[return] identifier[c]
keyword[return] identifier[c]
|
def cmp_mat(a, b):
"""
Sorts two matrices returning a positive or zero value
"""
c = 0
for (x, y) in zip(a.flat, b.flat):
c = cmp(abs(x), abs(y))
if c != 0:
return c # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=[]]
return c
|
def main(argv=None):
"""
pyrpo.main: parse commandline options with optparse and run specified
reports
"""
import logging
if argv is None:
argv = sys.argv
prs = get_option_parser()
(opts, args) = prs.parse_args(args=argv)
if not opts.quiet:
_format = None
_format = "%(levelname)s\t%(message)s"
# _format = "%(message)s"
logging.basicConfig(format=_format)
log = logging.getLogger('repos')
if opts.verbose:
log.setLevel(logging.DEBUG)
elif opts.quiet:
log.setLevel(logging.ERROR)
else:
log.setLevel(logging.INFO)
if not opts.scan:
opts.scan = ['.']
if opts.scan:
# if not opts.reports:
# opts.reports = ['pip']
if opts.reports or opts.thg_report:
opts.reports = [s.strip().lower() for s in opts.reports]
if 'thg' in opts.reports:
opts.thg_report = True
opts.reports.remove('thg')
# repos = []
# for _path in opts.scan:
# repos.extend(find_unique_repos(_path))
log.debug("SCANNING PATHS: %s" % opts.scan)
repos = chain(*imap(find_unique_repos, opts.scan))
if opts.reports and opts.thg_report:
repos = list(repos)
# TODO: tee
if opts.reports:
for report in opts.reports:
list(do_repo_report(repos, report=report))
if opts.thg_report:
do_tortoisehg_report(repos, output=sys.stdout)
else:
opts.scan = '.'
list(do_repo_report(
find_unique_repos(opts.scan),
report='sh'))
return 0
|
def function[main, parameter[argv]]:
constant[
pyrpo.main: parse commandline options with optparse and run specified
reports
]
import module[logging]
if compare[name[argv] is constant[None]] begin[:]
variable[argv] assign[=] name[sys].argv
variable[prs] assign[=] call[name[get_option_parser], parameter[]]
<ast.Tuple object at 0x7da1b094a110> assign[=] call[name[prs].parse_args, parameter[]]
if <ast.UnaryOp object at 0x7da1b094ace0> begin[:]
variable[_format] assign[=] constant[None]
variable[_format] assign[=] constant[%(levelname)s %(message)s]
call[name[logging].basicConfig, parameter[]]
variable[log] assign[=] call[name[logging].getLogger, parameter[constant[repos]]]
if name[opts].verbose begin[:]
call[name[log].setLevel, parameter[name[logging].DEBUG]]
if <ast.UnaryOp object at 0x7da1b0949b70> begin[:]
name[opts].scan assign[=] list[[<ast.Constant object at 0x7da1b2345b70>]]
if name[opts].scan begin[:]
if <ast.BoolOp object at 0x7da1b2347bb0> begin[:]
name[opts].reports assign[=] <ast.ListComp object at 0x7da1b2346230>
if compare[constant[thg] in name[opts].reports] begin[:]
name[opts].thg_report assign[=] constant[True]
call[name[opts].reports.remove, parameter[constant[thg]]]
call[name[log].debug, parameter[binary_operation[constant[SCANNING PATHS: %s] <ast.Mod object at 0x7da2590d6920> name[opts].scan]]]
variable[repos] assign[=] call[name[chain], parameter[<ast.Starred object at 0x7da1b23466b0>]]
if <ast.BoolOp object at 0x7da1b2345f90> begin[:]
variable[repos] assign[=] call[name[list], parameter[name[repos]]]
if name[opts].reports begin[:]
for taget[name[report]] in starred[name[opts].reports] begin[:]
call[name[list], parameter[call[name[do_repo_report], parameter[name[repos]]]]]
if name[opts].thg_report begin[:]
call[name[do_tortoisehg_report], parameter[name[repos]]]
return[constant[0]]
|
keyword[def] identifier[main] ( identifier[argv] = keyword[None] ):
literal[string]
keyword[import] identifier[logging]
keyword[if] identifier[argv] keyword[is] keyword[None] :
identifier[argv] = identifier[sys] . identifier[argv]
identifier[prs] = identifier[get_option_parser] ()
( identifier[opts] , identifier[args] )= identifier[prs] . identifier[parse_args] ( identifier[args] = identifier[argv] )
keyword[if] keyword[not] identifier[opts] . identifier[quiet] :
identifier[_format] = keyword[None]
identifier[_format] = literal[string]
identifier[logging] . identifier[basicConfig] ( identifier[format] = identifier[_format] )
identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] )
keyword[if] identifier[opts] . identifier[verbose] :
identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[DEBUG] )
keyword[elif] identifier[opts] . identifier[quiet] :
identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[ERROR] )
keyword[else] :
identifier[log] . identifier[setLevel] ( identifier[logging] . identifier[INFO] )
keyword[if] keyword[not] identifier[opts] . identifier[scan] :
identifier[opts] . identifier[scan] =[ literal[string] ]
keyword[if] identifier[opts] . identifier[scan] :
keyword[if] identifier[opts] . identifier[reports] keyword[or] identifier[opts] . identifier[thg_report] :
identifier[opts] . identifier[reports] =[ identifier[s] . identifier[strip] (). identifier[lower] () keyword[for] identifier[s] keyword[in] identifier[opts] . identifier[reports] ]
keyword[if] literal[string] keyword[in] identifier[opts] . identifier[reports] :
identifier[opts] . identifier[thg_report] = keyword[True]
identifier[opts] . identifier[reports] . identifier[remove] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] % identifier[opts] . identifier[scan] )
identifier[repos] = identifier[chain] (* identifier[imap] ( identifier[find_unique_repos] , identifier[opts] . identifier[scan] ))
keyword[if] identifier[opts] . identifier[reports] keyword[and] identifier[opts] . identifier[thg_report] :
identifier[repos] = identifier[list] ( identifier[repos] )
keyword[if] identifier[opts] . identifier[reports] :
keyword[for] identifier[report] keyword[in] identifier[opts] . identifier[reports] :
identifier[list] ( identifier[do_repo_report] ( identifier[repos] , identifier[report] = identifier[report] ))
keyword[if] identifier[opts] . identifier[thg_report] :
identifier[do_tortoisehg_report] ( identifier[repos] , identifier[output] = identifier[sys] . identifier[stdout] )
keyword[else] :
identifier[opts] . identifier[scan] = literal[string]
identifier[list] ( identifier[do_repo_report] (
identifier[find_unique_repos] ( identifier[opts] . identifier[scan] ),
identifier[report] = literal[string] ))
keyword[return] literal[int]
|
def main(argv=None):
"""
pyrpo.main: parse commandline options with optparse and run specified
reports
"""
import logging
if argv is None:
argv = sys.argv # depends on [control=['if'], data=['argv']]
prs = get_option_parser()
(opts, args) = prs.parse_args(args=argv)
if not opts.quiet:
_format = None
_format = '%(levelname)s\t%(message)s'
# _format = "%(message)s"
logging.basicConfig(format=_format) # depends on [control=['if'], data=[]]
log = logging.getLogger('repos')
if opts.verbose:
log.setLevel(logging.DEBUG) # depends on [control=['if'], data=[]]
elif opts.quiet:
log.setLevel(logging.ERROR) # depends on [control=['if'], data=[]]
else:
log.setLevel(logging.INFO)
if not opts.scan:
opts.scan = ['.'] # depends on [control=['if'], data=[]]
if opts.scan:
# if not opts.reports:
# opts.reports = ['pip']
if opts.reports or opts.thg_report:
opts.reports = [s.strip().lower() for s in opts.reports]
if 'thg' in opts.reports:
opts.thg_report = True
opts.reports.remove('thg') # depends on [control=['if'], data=[]]
# repos = []
# for _path in opts.scan:
# repos.extend(find_unique_repos(_path))
log.debug('SCANNING PATHS: %s' % opts.scan)
repos = chain(*imap(find_unique_repos, opts.scan))
if opts.reports and opts.thg_report:
repos = list(repos) # depends on [control=['if'], data=[]]
# TODO: tee
if opts.reports:
for report in opts.reports:
list(do_repo_report(repos, report=report)) # depends on [control=['for'], data=['report']] # depends on [control=['if'], data=[]]
if opts.thg_report:
do_tortoisehg_report(repos, output=sys.stdout) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
opts.scan = '.'
list(do_repo_report(find_unique_repos(opts.scan), report='sh')) # depends on [control=['if'], data=[]]
return 0
|
def check_dependee_exists(self, depender, dependee, dependee_id):
"""Checks whether a depended-on module is available.
"""
shutit_global.shutit_global_object.yield_to_draw()
# If the module id isn't there, there's a problem.
if dependee is None:
return 'module: \n\n' + dependee_id + '\n\nnot found in paths: ' + str(self.host['shutit_module_path']) + ' but needed for ' + depender.module_id + '\nCheck your --shutit_module_path setting and ensure that all modules configured to be built are in that path setting, eg "--shutit_module_path /path/to/other/module/:."\n\nAlso check that the module is configured to be built with the correct module id in that module\'s configs/build.cnf file.\n\nSee also help.'
return ''
|
def function[check_dependee_exists, parameter[self, depender, dependee, dependee_id]]:
constant[Checks whether a depended-on module is available.
]
call[name[shutit_global].shutit_global_object.yield_to_draw, parameter[]]
if compare[name[dependee] is constant[None]] begin[:]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[module:
] + name[dependee_id]] + constant[
not found in paths: ]] + call[name[str], parameter[call[name[self].host][constant[shutit_module_path]]]]] + constant[ but needed for ]] + name[depender].module_id] + constant[
Check your --shutit_module_path setting and ensure that all modules configured to be built are in that path setting, eg "--shutit_module_path /path/to/other/module/:."
Also check that the module is configured to be built with the correct module id in that module's configs/build.cnf file.
See also help.]]]
return[constant[]]
|
keyword[def] identifier[check_dependee_exists] ( identifier[self] , identifier[depender] , identifier[dependee] , identifier[dependee_id] ):
literal[string]
identifier[shutit_global] . identifier[shutit_global_object] . identifier[yield_to_draw] ()
keyword[if] identifier[dependee] keyword[is] keyword[None] :
keyword[return] literal[string] + identifier[dependee_id] + literal[string] + identifier[str] ( identifier[self] . identifier[host] [ literal[string] ])+ literal[string] + identifier[depender] . identifier[module_id] + literal[string]
keyword[return] literal[string]
|
def check_dependee_exists(self, depender, dependee, dependee_id):
"""Checks whether a depended-on module is available.
"""
shutit_global.shutit_global_object.yield_to_draw() # If the module id isn't there, there's a problem.
if dependee is None:
return 'module: \n\n' + dependee_id + '\n\nnot found in paths: ' + str(self.host['shutit_module_path']) + ' but needed for ' + depender.module_id + '\nCheck your --shutit_module_path setting and ensure that all modules configured to be built are in that path setting, eg "--shutit_module_path /path/to/other/module/:."\n\nAlso check that the module is configured to be built with the correct module id in that module\'s configs/build.cnf file.\n\nSee also help.' # depends on [control=['if'], data=[]]
return ''
|
def _build_arguments(self):
"""
build arguments for command.
"""
self._parser.add_argument(
'--attach',
type=bool,
required=False,
default=False,
help="Attach to containers output?"
)
self._parser.add_argument(
'--clean',
type=bool,
required=False,
default=False,
help="clean up everything that was created by freight forwarder at the end."
)
self._parser.add_argument(
'--configs',
type=bool,
required=False,
default=False,
help="Would you like to inject configuration files?"
)
self._parser.add_argument(
'-e', '--env',
required=False,
type=str,
action='append',
default=None,
help='environment variables to create in the container at run time.'
)
self._parser.add_argument(
'--test',
type=bool,
required=False,
default=False,
help="Run tests."
)
self._parser.add_argument(
'--use-cache',
required=False,
action='store_true',
default=False,
help='Allow build to use cached image layers.'
)
|
def function[_build_arguments, parameter[self]]:
constant[
build arguments for command.
]
call[name[self]._parser.add_argument, parameter[constant[--attach]]]
call[name[self]._parser.add_argument, parameter[constant[--clean]]]
call[name[self]._parser.add_argument, parameter[constant[--configs]]]
call[name[self]._parser.add_argument, parameter[constant[-e], constant[--env]]]
call[name[self]._parser.add_argument, parameter[constant[--test]]]
call[name[self]._parser.add_argument, parameter[constant[--use-cache]]]
|
keyword[def] identifier[_build_arguments] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_parser] . identifier[add_argument] (
literal[string] ,
identifier[type] = identifier[bool] ,
identifier[required] = keyword[False] ,
identifier[default] = keyword[False] ,
identifier[help] = literal[string]
)
identifier[self] . identifier[_parser] . identifier[add_argument] (
literal[string] ,
identifier[type] = identifier[bool] ,
identifier[required] = keyword[False] ,
identifier[default] = keyword[False] ,
identifier[help] = literal[string]
)
identifier[self] . identifier[_parser] . identifier[add_argument] (
literal[string] ,
identifier[type] = identifier[bool] ,
identifier[required] = keyword[False] ,
identifier[default] = keyword[False] ,
identifier[help] = literal[string]
)
identifier[self] . identifier[_parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[required] = keyword[False] ,
identifier[type] = identifier[str] ,
identifier[action] = literal[string] ,
identifier[default] = keyword[None] ,
identifier[help] = literal[string]
)
identifier[self] . identifier[_parser] . identifier[add_argument] (
literal[string] ,
identifier[type] = identifier[bool] ,
identifier[required] = keyword[False] ,
identifier[default] = keyword[False] ,
identifier[help] = literal[string]
)
identifier[self] . identifier[_parser] . identifier[add_argument] (
literal[string] ,
identifier[required] = keyword[False] ,
identifier[action] = literal[string] ,
identifier[default] = keyword[False] ,
identifier[help] = literal[string]
)
|
def _build_arguments(self):
"""
build arguments for command.
"""
self._parser.add_argument('--attach', type=bool, required=False, default=False, help='Attach to containers output?')
self._parser.add_argument('--clean', type=bool, required=False, default=False, help='clean up everything that was created by freight forwarder at the end.')
self._parser.add_argument('--configs', type=bool, required=False, default=False, help='Would you like to inject configuration files?')
self._parser.add_argument('-e', '--env', required=False, type=str, action='append', default=None, help='environment variables to create in the container at run time.')
self._parser.add_argument('--test', type=bool, required=False, default=False, help='Run tests.')
self._parser.add_argument('--use-cache', required=False, action='store_true', default=False, help='Allow build to use cached image layers.')
|
def command_callback(result=None):
"""
:type result: opendnp3.ICommandTaskResult
"""
print("Received command result with summary: {}".format(opendnp3.TaskCompletionToString(result.summary)))
result.ForeachItem(collection_callback)
|
def function[command_callback, parameter[result]]:
constant[
:type result: opendnp3.ICommandTaskResult
]
call[name[print], parameter[call[constant[Received command result with summary: {}].format, parameter[call[name[opendnp3].TaskCompletionToString, parameter[name[result].summary]]]]]]
call[name[result].ForeachItem, parameter[name[collection_callback]]]
|
keyword[def] identifier[command_callback] ( identifier[result] = keyword[None] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[opendnp3] . identifier[TaskCompletionToString] ( identifier[result] . identifier[summary] )))
identifier[result] . identifier[ForeachItem] ( identifier[collection_callback] )
|
def command_callback(result=None):
"""
:type result: opendnp3.ICommandTaskResult
"""
print('Received command result with summary: {}'.format(opendnp3.TaskCompletionToString(result.summary)))
result.ForeachItem(collection_callback)
|
def _set_all_tables(self, schema, **kwargs):
"""
You can run into a problem when you are trying to set a table and it has a
foreign key to a table that doesn't exist, so this method will go through
all fk refs and make sure the tables exist
"""
with self.transaction(**kwargs) as connection:
kwargs['connection'] = connection
# go through and make sure all foreign key referenced tables exist
for field_name, field_val in schema.fields.items():
s = field_val.schema
if s:
self._set_all_tables(s, **kwargs)
# now that we know all fk tables exist, create this table
self.set_table(schema, **kwargs)
return True
|
def function[_set_all_tables, parameter[self, schema]]:
constant[
You can run into a problem when you are trying to set a table and it has a
foreign key to a table that doesn't exist, so this method will go through
all fk refs and make sure the tables exist
]
with call[name[self].transaction, parameter[]] begin[:]
call[name[kwargs]][constant[connection]] assign[=] name[connection]
for taget[tuple[[<ast.Name object at 0x7da1b1a1fd00>, <ast.Name object at 0x7da1b1a1ce20>]]] in starred[call[name[schema].fields.items, parameter[]]] begin[:]
variable[s] assign[=] name[field_val].schema
if name[s] begin[:]
call[name[self]._set_all_tables, parameter[name[s]]]
call[name[self].set_table, parameter[name[schema]]]
return[constant[True]]
|
keyword[def] identifier[_set_all_tables] ( identifier[self] , identifier[schema] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[self] . identifier[transaction] (** identifier[kwargs] ) keyword[as] identifier[connection] :
identifier[kwargs] [ literal[string] ]= identifier[connection]
keyword[for] identifier[field_name] , identifier[field_val] keyword[in] identifier[schema] . identifier[fields] . identifier[items] ():
identifier[s] = identifier[field_val] . identifier[schema]
keyword[if] identifier[s] :
identifier[self] . identifier[_set_all_tables] ( identifier[s] ,** identifier[kwargs] )
identifier[self] . identifier[set_table] ( identifier[schema] ,** identifier[kwargs] )
keyword[return] keyword[True]
|
def _set_all_tables(self, schema, **kwargs):
"""
You can run into a problem when you are trying to set a table and it has a
foreign key to a table that doesn't exist, so this method will go through
all fk refs and make sure the tables exist
"""
with self.transaction(**kwargs) as connection:
kwargs['connection'] = connection
# go through and make sure all foreign key referenced tables exist
for (field_name, field_val) in schema.fields.items():
s = field_val.schema
if s:
self._set_all_tables(s, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# now that we know all fk tables exist, create this table
self.set_table(schema, **kwargs) # depends on [control=['with'], data=['connection']]
return True
|
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: path
name: name
type: string
- in: path
name: value
type: string
produces:
- text/plain
responses:
200:
description: Set cookies and redirects to cookie list.
"""
r = app.make_response(redirect(url_for("view_cookies")))
r.set_cookie(key=name, value=value, secure=secure_cookie())
return r
|
def function[set_cookie, parameter[name, value]]:
constant[Sets a cookie and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: path
name: name
type: string
- in: path
name: value
type: string
produces:
- text/plain
responses:
200:
description: Set cookies and redirects to cookie list.
]
variable[r] assign[=] call[name[app].make_response, parameter[call[name[redirect], parameter[call[name[url_for], parameter[constant[view_cookies]]]]]]]
call[name[r].set_cookie, parameter[]]
return[name[r]]
|
keyword[def] identifier[set_cookie] ( identifier[name] , identifier[value] ):
literal[string]
identifier[r] = identifier[app] . identifier[make_response] ( identifier[redirect] ( identifier[url_for] ( literal[string] )))
identifier[r] . identifier[set_cookie] ( identifier[key] = identifier[name] , identifier[value] = identifier[value] , identifier[secure] = identifier[secure_cookie] ())
keyword[return] identifier[r]
|
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: path
name: name
type: string
- in: path
name: value
type: string
produces:
- text/plain
responses:
200:
description: Set cookies and redirects to cookie list.
"""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie())
return r
|
def get_by_code(self, code):
"""
Retrieve a language by a code.
:param code: iso code (any of the three) or its culture code
:return: a Language object
"""
if any(x in code for x in ('_', '-')):
cc = CultureCode.objects.get(code=code.replace('_', '-'))
return cc.language
elif len(code) == 2:
return self.get(iso_639_1=code)
elif len(code) == 3:
return self.get(Q(iso_639_2T=code) |
Q(iso_639_2B=code) |
Q(iso_639_3=code))
raise ValueError(
'Code must be either 2, or 3 characters: "%s" is %s' % (code, len(code)))
|
def function[get_by_code, parameter[self, code]]:
constant[
Retrieve a language by a code.
:param code: iso code (any of the three) or its culture code
:return: a Language object
]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b253e380>]] begin[:]
variable[cc] assign[=] call[name[CultureCode].objects.get, parameter[]]
return[name[cc].language]
<ast.Raise object at 0x7da1b253e470>
|
keyword[def] identifier[get_by_code] ( identifier[self] , identifier[code] ):
literal[string]
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[code] keyword[for] identifier[x] keyword[in] ( literal[string] , literal[string] )):
identifier[cc] = identifier[CultureCode] . identifier[objects] . identifier[get] ( identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] ))
keyword[return] identifier[cc] . identifier[language]
keyword[elif] identifier[len] ( identifier[code] )== literal[int] :
keyword[return] identifier[self] . identifier[get] ( identifier[iso_639_1] = identifier[code] )
keyword[elif] identifier[len] ( identifier[code] )== literal[int] :
keyword[return] identifier[self] . identifier[get] ( identifier[Q] ( identifier[iso_639_2T] = identifier[code] )|
identifier[Q] ( identifier[iso_639_2B] = identifier[code] )|
identifier[Q] ( identifier[iso_639_3] = identifier[code] ))
keyword[raise] identifier[ValueError] (
literal[string] %( identifier[code] , identifier[len] ( identifier[code] )))
|
def get_by_code(self, code):
"""
Retrieve a language by a code.
:param code: iso code (any of the three) or its culture code
:return: a Language object
"""
if any((x in code for x in ('_', '-'))):
cc = CultureCode.objects.get(code=code.replace('_', '-'))
return cc.language # depends on [control=['if'], data=[]]
elif len(code) == 2:
return self.get(iso_639_1=code) # depends on [control=['if'], data=[]]
elif len(code) == 3:
return self.get(Q(iso_639_2T=code) | Q(iso_639_2B=code) | Q(iso_639_3=code)) # depends on [control=['if'], data=[]]
raise ValueError('Code must be either 2, or 3 characters: "%s" is %s' % (code, len(code)))
|
def set_position(cls, resource_id, to_position, db_session=None, *args, **kwargs):
"""
Sets node position for new node in the tree
:param resource_id: resource to move
:param to_position: new position
:param db_session:
:return:def count_children(cls, resource_id, db_session=None):
"""
db_session = get_db_session(db_session)
# lets lock rows to prevent bad tree states
resource = ResourceService.lock_resource_for_update(
resource_id=resource_id, db_session=db_session
)
cls.check_node_position(
resource.parent_id, to_position, on_same_branch=True, db_session=db_session
)
cls.shift_ordering_up(resource.parent_id, to_position, db_session=db_session)
db_session.flush()
db_session.expire(resource)
resource.ordering = to_position
return True
|
def function[set_position, parameter[cls, resource_id, to_position, db_session]]:
constant[
Sets node position for new node in the tree
:param resource_id: resource to move
:param to_position: new position
:param db_session:
:return:def count_children(cls, resource_id, db_session=None):
]
variable[db_session] assign[=] call[name[get_db_session], parameter[name[db_session]]]
variable[resource] assign[=] call[name[ResourceService].lock_resource_for_update, parameter[]]
call[name[cls].check_node_position, parameter[name[resource].parent_id, name[to_position]]]
call[name[cls].shift_ordering_up, parameter[name[resource].parent_id, name[to_position]]]
call[name[db_session].flush, parameter[]]
call[name[db_session].expire, parameter[name[resource]]]
name[resource].ordering assign[=] name[to_position]
return[constant[True]]
|
keyword[def] identifier[set_position] ( identifier[cls] , identifier[resource_id] , identifier[to_position] , identifier[db_session] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[db_session] = identifier[get_db_session] ( identifier[db_session] )
identifier[resource] = identifier[ResourceService] . identifier[lock_resource_for_update] (
identifier[resource_id] = identifier[resource_id] , identifier[db_session] = identifier[db_session]
)
identifier[cls] . identifier[check_node_position] (
identifier[resource] . identifier[parent_id] , identifier[to_position] , identifier[on_same_branch] = keyword[True] , identifier[db_session] = identifier[db_session]
)
identifier[cls] . identifier[shift_ordering_up] ( identifier[resource] . identifier[parent_id] , identifier[to_position] , identifier[db_session] = identifier[db_session] )
identifier[db_session] . identifier[flush] ()
identifier[db_session] . identifier[expire] ( identifier[resource] )
identifier[resource] . identifier[ordering] = identifier[to_position]
keyword[return] keyword[True]
|
def set_position(cls, resource_id, to_position, db_session=None, *args, **kwargs):
"""
Sets node position for new node in the tree
:param resource_id: resource to move
:param to_position: new position
:param db_session:
:return:def count_children(cls, resource_id, db_session=None):
"""
db_session = get_db_session(db_session)
# lets lock rows to prevent bad tree states
resource = ResourceService.lock_resource_for_update(resource_id=resource_id, db_session=db_session)
cls.check_node_position(resource.parent_id, to_position, on_same_branch=True, db_session=db_session)
cls.shift_ordering_up(resource.parent_id, to_position, db_session=db_session)
db_session.flush()
db_session.expire(resource)
resource.ordering = to_position
return True
|
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
|
def function[delete_row_range, parameter[self, format_str, start_game, end_game]]:
constant[Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
]
variable[row_keys] assign[=] call[name[make_single_array], parameter[call[name[self].tf_table.keys_by_range_dataset, parameter[call[name[format_str].format, parameter[name[start_game]]], call[name[format_str].format, parameter[name[end_game]]]]]]]
variable[row_keys] assign[=] call[name[list], parameter[name[row_keys]]]
if <ast.UnaryOp object at 0x7da20e955240> begin[:]
call[name[utils].dbg, parameter[binary_operation[constant[No rows left for games %d..%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e954550>, <ast.Name object at 0x7da20e957940>]]]]]
return[None]
call[name[utils].dbg, parameter[binary_operation[constant[Deleting %d rows: %s..%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20e957340>, <ast.Subscript object at 0x7da20e955c00>, <ast.Subscript object at 0x7da20e9577c0>]]]]]
call[name[row_keys].reverse, parameter[]]
variable[total_keys] assign[=] call[name[len], parameter[name[row_keys]]]
call[name[utils].dbg, parameter[binary_operation[constant[Deleting total of %d keys] <ast.Mod object at 0x7da2590d6920> name[total_keys]]]]
variable[concurrency] assign[=] call[name[min], parameter[name[MAX_BT_CONCURRENCY], binary_operation[call[name[multiprocessing].cpu_count, parameter[]] * constant[2]]]]
with call[name[multiprocessing].Pool, parameter[]] begin[:]
variable[batches] assign[=] list[[]]
with call[name[tqdm], parameter[]] begin[:]
for taget[name[b]] in starred[call[name[utils].iter_chunks, parameter[name[bigtable].row.MAX_MUTATIONS, name[row_keys]]]] begin[:]
call[name[pbar].update, parameter[call[name[len], parameter[name[b]]]]]
call[name[batches].append, parameter[tuple[[<ast.Attribute object at 0x7da20cabd270>, <ast.Name object at 0x7da20cabd180>]]]]
if compare[call[name[len], parameter[name[batches]]] greater_or_equal[>=] name[concurrency]] begin[:]
call[name[pool].map, parameter[name[_delete_rows], name[batches]]]
variable[batches] assign[=] list[[]]
call[name[pool].map, parameter[name[_delete_rows], name[batches]]]
variable[batches] assign[=] list[[]]
|
keyword[def] identifier[delete_row_range] ( identifier[self] , identifier[format_str] , identifier[start_game] , identifier[end_game] ):
literal[string]
identifier[row_keys] = identifier[make_single_array] (
identifier[self] . identifier[tf_table] . identifier[keys_by_range_dataset] (
identifier[format_str] . identifier[format] ( identifier[start_game] ),
identifier[format_str] . identifier[format] ( identifier[end_game] )))
identifier[row_keys] = identifier[list] ( identifier[row_keys] )
keyword[if] keyword[not] identifier[row_keys] :
identifier[utils] . identifier[dbg] ( literal[string] %(
identifier[start_game] , identifier[end_game] ))
keyword[return]
identifier[utils] . identifier[dbg] ( literal[string] %(
identifier[len] ( identifier[row_keys] ), identifier[row_keys] [ literal[int] ], identifier[row_keys] [- literal[int] ]))
identifier[row_keys] . identifier[reverse] ()
identifier[total_keys] = identifier[len] ( identifier[row_keys] )
identifier[utils] . identifier[dbg] ( literal[string] % identifier[total_keys] )
identifier[concurrency] = identifier[min] ( identifier[MAX_BT_CONCURRENCY] ,
identifier[multiprocessing] . identifier[cpu_count] ()* literal[int] )
keyword[with] identifier[multiprocessing] . identifier[Pool] ( identifier[processes] = identifier[concurrency] ) keyword[as] identifier[pool] :
identifier[batches] =[]
keyword[with] identifier[tqdm] ( identifier[desc] = literal[string] , identifier[unit_scale] = literal[int] , identifier[total] = identifier[total_keys] ) keyword[as] identifier[pbar] :
keyword[for] identifier[b] keyword[in] identifier[utils] . identifier[iter_chunks] ( identifier[bigtable] . identifier[row] . identifier[MAX_MUTATIONS] ,
identifier[row_keys] ):
identifier[pbar] . identifier[update] ( identifier[len] ( identifier[b] ))
identifier[batches] . identifier[append] (( identifier[self] . identifier[btspec] , identifier[b] ))
keyword[if] identifier[len] ( identifier[batches] )>= identifier[concurrency] :
identifier[pool] . identifier[map] ( identifier[_delete_rows] , identifier[batches] )
identifier[batches] =[]
identifier[pool] . identifier[map] ( identifier[_delete_rows] , identifier[batches] )
identifier[batches] =[]
|
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(self.tf_table.keys_by_range_dataset(format_str.format(start_game), format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (start_game, end_game))
return # depends on [control=['if'], data=[]]
utils.dbg('Deleting %d rows: %s..%s' % (len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY, multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS, row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = [] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['b']]
pool.map(_delete_rows, batches)
batches = [] # depends on [control=['with'], data=['pbar']] # depends on [control=['with'], data=['pool']]
|
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
|
def function[relation_to_intermediary, parameter[fk]]:
constant[Transform an SQLAlchemy ForeignKey object to it's intermediary representation. ]
return[call[name[Relation], parameter[]]]
|
keyword[def] identifier[relation_to_intermediary] ( identifier[fk] ):
literal[string]
keyword[return] identifier[Relation] (
identifier[right_col] = identifier[format_name] ( identifier[fk] . identifier[parent] . identifier[table] . identifier[fullname] ),
identifier[left_col] = identifier[format_name] ( identifier[fk] . identifier[_column_tokens] [ literal[int] ]),
identifier[right_cardinality] = literal[string] ,
identifier[left_cardinality] = literal[string] ,
)
|
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(right_col=format_name(fk.parent.table.fullname), left_col=format_name(fk._column_tokens[1]), right_cardinality='?', left_cardinality='*')
|
def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
"""
serializer = TypeSerializer()
return {key: serializer.serialize(value) for key, value in item.items()}
|
def function[dict_to_ddb, parameter[item]]:
constant[Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
]
variable[serializer] assign[=] call[name[TypeSerializer], parameter[]]
return[<ast.DictComp object at 0x7da1b0b30d60>]
|
keyword[def] identifier[dict_to_ddb] ( identifier[item] ):
literal[string]
identifier[serializer] = identifier[TypeSerializer] ()
keyword[return] { identifier[key] : identifier[serializer] . identifier[serialize] ( identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[item] . identifier[items] ()}
|
def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
'Converts a native Python dictionary to a raw DynamoDB item.\n\n :param dict item: Native item\n :returns: DynamoDB item\n :rtype: dict\n '
serializer = TypeSerializer()
return {key: serializer.serialize(value) for (key, value) in item.items()}
|
def getpad(self):
"""Get pad value of DataBatch."""
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
# check the first batch
elif self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
return -self.cursor
else:
return 0
|
def function[getpad, parameter[self]]:
constant[Get pad value of DataBatch.]
if <ast.BoolOp object at 0x7da1b200bfa0> begin[:]
return[binary_operation[binary_operation[name[self].cursor + name[self].batch_size] - name[self].num_data]]
|
keyword[def] identifier[getpad] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[last_batch_handle] == literal[string] keyword[and] identifier[self] . identifier[cursor] + identifier[self] . identifier[batch_size] > identifier[self] . identifier[num_data] :
keyword[return] identifier[self] . identifier[cursor] + identifier[self] . identifier[batch_size] - identifier[self] . identifier[num_data]
keyword[elif] identifier[self] . identifier[last_batch_handle] == literal[string] keyword[and] - identifier[self] . identifier[batch_size] < identifier[self] . identifier[cursor] < literal[int] :
keyword[return] - identifier[self] . identifier[cursor]
keyword[else] :
keyword[return] literal[int]
|
def getpad(self):
"""Get pad value of DataBatch."""
if self.last_batch_handle == 'pad' and self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data # depends on [control=['if'], data=[]]
# check the first batch
elif self.last_batch_handle == 'roll_over' and -self.batch_size < self.cursor < 0:
return -self.cursor # depends on [control=['if'], data=[]]
else:
return 0
|
def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER):
'''
Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
'''
setting = 'IPGrant'
formatted_addresses = list()
# It's okay to accept an empty list for set_connection_ip_list,
# since an empty list may be desirable.
if not addresses:
addresses = dict()
_LOG.debug('Empty %s specified.', setting)
# Convert addresses to the 'ip_address, subnet' format used by
# IIsIPSecuritySetting.
for address in addresses:
formatted_addresses.append('{0}, {1}'.format(address.strip(),
addresses[address].strip()))
current_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
# Order is not important, so compare to the current addresses as unordered sets.
if set(formatted_addresses) == set(current_addresses):
_LOG.debug('%s already contains the provided addresses.', setting)
return True
# First we should check GrantByDefault, and change it if necessary.
current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server)
if grant_by_default != current_grant_by_default:
_LOG.debug('Setting GrantByDefault to: %s', grant_by_default)
_set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server)
_set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server)
new_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
ret = set(formatted_addresses) == set(new_addresses)
if ret:
_LOG.debug('%s configured successfully: %s', setting, formatted_addresses)
return ret
_LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses)
return ret
|
def function[set_connection_ip_list, parameter[addresses, grant_by_default, server]]:
constant[
Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
]
variable[setting] assign[=] constant[IPGrant]
variable[formatted_addresses] assign[=] call[name[list], parameter[]]
if <ast.UnaryOp object at 0x7da1b1cae5c0> begin[:]
variable[addresses] assign[=] call[name[dict], parameter[]]
call[name[_LOG].debug, parameter[constant[Empty %s specified.], name[setting]]]
for taget[name[address]] in starred[name[addresses]] begin[:]
call[name[formatted_addresses].append, parameter[call[constant[{0}, {1}].format, parameter[call[name[address].strip, parameter[]], call[call[name[addresses]][name[address]].strip, parameter[]]]]]]
variable[current_addresses] assign[=] call[name[get_connection_ip_list], parameter[]]
if compare[call[name[set], parameter[name[formatted_addresses]]] equal[==] call[name[set], parameter[name[current_addresses]]]] begin[:]
call[name[_LOG].debug, parameter[constant[%s already contains the provided addresses.], name[setting]]]
return[constant[True]]
variable[current_grant_by_default] assign[=] call[name[_get_wmi_setting], parameter[constant[IIsIPSecuritySetting], constant[GrantByDefault], name[server]]]
if compare[name[grant_by_default] not_equal[!=] name[current_grant_by_default]] begin[:]
call[name[_LOG].debug, parameter[constant[Setting GrantByDefault to: %s], name[grant_by_default]]]
call[name[_set_wmi_setting], parameter[constant[IIsIPSecuritySetting], constant[GrantByDefault], name[grant_by_default], name[server]]]
call[name[_set_wmi_setting], parameter[constant[IIsIPSecuritySetting], name[setting], name[formatted_addresses], name[server]]]
variable[new_addresses] assign[=] call[name[get_connection_ip_list], parameter[]]
variable[ret] assign[=] compare[call[name[set], parameter[name[formatted_addresses]]] equal[==] call[name[set], parameter[name[new_addresses]]]]
if name[ret] begin[:]
call[name[_LOG].debug, parameter[constant[%s configured successfully: %s], name[setting], name[formatted_addresses]]]
return[name[ret]]
call[name[_LOG].error, parameter[constant[Unable to configure %s with value: %s], name[setting], name[formatted_addresses]]]
return[name[ret]]
|
keyword[def] identifier[set_connection_ip_list] ( identifier[addresses] = keyword[None] , identifier[grant_by_default] = keyword[False] , identifier[server] = identifier[_DEFAULT_SERVER] ):
literal[string]
identifier[setting] = literal[string]
identifier[formatted_addresses] = identifier[list] ()
keyword[if] keyword[not] identifier[addresses] :
identifier[addresses] = identifier[dict] ()
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[setting] )
keyword[for] identifier[address] keyword[in] identifier[addresses] :
identifier[formatted_addresses] . identifier[append] ( literal[string] . identifier[format] ( identifier[address] . identifier[strip] (),
identifier[addresses] [ identifier[address] ]. identifier[strip] ()))
identifier[current_addresses] = identifier[get_connection_ip_list] ( identifier[as_wmi_format] = keyword[True] , identifier[server] = identifier[server] )
keyword[if] identifier[set] ( identifier[formatted_addresses] )== identifier[set] ( identifier[current_addresses] ):
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[setting] )
keyword[return] keyword[True]
identifier[current_grant_by_default] = identifier[_get_wmi_setting] ( literal[string] , literal[string] , identifier[server] )
keyword[if] identifier[grant_by_default] != identifier[current_grant_by_default] :
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[grant_by_default] )
identifier[_set_wmi_setting] ( literal[string] , literal[string] , identifier[grant_by_default] , identifier[server] )
identifier[_set_wmi_setting] ( literal[string] , identifier[setting] , identifier[formatted_addresses] , identifier[server] )
identifier[new_addresses] = identifier[get_connection_ip_list] ( identifier[as_wmi_format] = keyword[True] , identifier[server] = identifier[server] )
identifier[ret] = identifier[set] ( identifier[formatted_addresses] )== identifier[set] ( identifier[new_addresses] )
keyword[if] identifier[ret] :
identifier[_LOG] . identifier[debug] ( literal[string] , identifier[setting] , identifier[formatted_addresses] )
keyword[return] identifier[ret]
identifier[_LOG] . identifier[error] ( literal[string] , identifier[setting] , identifier[formatted_addresses] )
keyword[return] identifier[ret]
|
def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER):
"""
Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
"""
setting = 'IPGrant'
formatted_addresses = list()
# It's okay to accept an empty list for set_connection_ip_list,
# since an empty list may be desirable.
if not addresses:
addresses = dict()
_LOG.debug('Empty %s specified.', setting) # depends on [control=['if'], data=[]]
# Convert addresses to the 'ip_address, subnet' format used by
# IIsIPSecuritySetting.
for address in addresses:
formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) # depends on [control=['for'], data=['address']]
current_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
# Order is not important, so compare to the current addresses as unordered sets.
if set(formatted_addresses) == set(current_addresses):
_LOG.debug('%s already contains the provided addresses.', setting)
return True # depends on [control=['if'], data=[]]
# First we should check GrantByDefault, and change it if necessary.
current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server)
if grant_by_default != current_grant_by_default:
_LOG.debug('Setting GrantByDefault to: %s', grant_by_default)
_set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) # depends on [control=['if'], data=['grant_by_default']]
_set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server)
new_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
ret = set(formatted_addresses) == set(new_addresses)
if ret:
_LOG.debug('%s configured successfully: %s', setting, formatted_addresses)
return ret # depends on [control=['if'], data=[]]
_LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses)
return ret
|
def pop_smallest(self):
"""Return the item with the lowest priority and remove it.
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heappop(heap)
while k not in self or self[k] != v:
v, k = heappop(heap)
del self[k]
return k
|
def function[pop_smallest, parameter[self]]:
constant[Return the item with the lowest priority and remove it.
Raises IndexError if the object is empty.
]
variable[heap] assign[=] name[self]._heap
<ast.Tuple object at 0x7da1b1b0cca0> assign[=] call[name[heappop], parameter[name[heap]]]
while <ast.BoolOp object at 0x7da1b1b0c250> begin[:]
<ast.Tuple object at 0x7da1b1b0e890> assign[=] call[name[heappop], parameter[name[heap]]]
<ast.Delete object at 0x7da1b1b0e020>
return[name[k]]
|
keyword[def] identifier[pop_smallest] ( identifier[self] ):
literal[string]
identifier[heap] = identifier[self] . identifier[_heap]
identifier[v] , identifier[k] = identifier[heappop] ( identifier[heap] )
keyword[while] identifier[k] keyword[not] keyword[in] identifier[self] keyword[or] identifier[self] [ identifier[k] ]!= identifier[v] :
identifier[v] , identifier[k] = identifier[heappop] ( identifier[heap] )
keyword[del] identifier[self] [ identifier[k] ]
keyword[return] identifier[k]
|
def pop_smallest(self):
"""Return the item with the lowest priority and remove it.
Raises IndexError if the object is empty.
"""
heap = self._heap
(v, k) = heappop(heap)
while k not in self or self[k] != v:
(v, k) = heappop(heap) # depends on [control=['while'], data=[]]
del self[k]
return k
|
def surf_keep_cortex(surf, cortex):
"""
Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex.
Inputs
-------
surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z
coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three
nodes building one triangle of the surface mesh.
(e.g. the output from nibabel.freesurfer.io.read_geometry)
cortex : Array with indices of vertices included in within the cortex.
(e.g. the output from nibabel.freesurfer.io.read_label)
"""
# split surface into vertices and triangles
vertices, triangles = surf
# keep only the vertices within the cortex label
cortex_vertices = np.array(vertices[cortex], dtype=np.float64)
# keep only the triangles within the cortex label
cortex_triangles = triangles_keep_cortex(triangles, cortex)
return cortex_vertices, cortex_triangles
|
def function[surf_keep_cortex, parameter[surf, cortex]]:
constant[
Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex.
Inputs
-------
surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z
coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three
nodes building one triangle of the surface mesh.
(e.g. the output from nibabel.freesurfer.io.read_geometry)
cortex : Array with indices of vertices included in within the cortex.
(e.g. the output from nibabel.freesurfer.io.read_label)
]
<ast.Tuple object at 0x7da18dc9bcd0> assign[=] name[surf]
variable[cortex_vertices] assign[=] call[name[np].array, parameter[call[name[vertices]][name[cortex]]]]
variable[cortex_triangles] assign[=] call[name[triangles_keep_cortex], parameter[name[triangles], name[cortex]]]
return[tuple[[<ast.Name object at 0x7da18dc98940>, <ast.Name object at 0x7da18dc987f0>]]]
|
keyword[def] identifier[surf_keep_cortex] ( identifier[surf] , identifier[cortex] ):
literal[string]
identifier[vertices] , identifier[triangles] = identifier[surf]
identifier[cortex_vertices] = identifier[np] . identifier[array] ( identifier[vertices] [ identifier[cortex] ], identifier[dtype] = identifier[np] . identifier[float64] )
identifier[cortex_triangles] = identifier[triangles_keep_cortex] ( identifier[triangles] , identifier[cortex] )
keyword[return] identifier[cortex_vertices] , identifier[cortex_triangles]
|
def surf_keep_cortex(surf, cortex):
"""
Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex.
Inputs
-------
surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z
coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three
nodes building one triangle of the surface mesh.
(e.g. the output from nibabel.freesurfer.io.read_geometry)
cortex : Array with indices of vertices included in within the cortex.
(e.g. the output from nibabel.freesurfer.io.read_label)
"""
# split surface into vertices and triangles
(vertices, triangles) = surf
# keep only the vertices within the cortex label
cortex_vertices = np.array(vertices[cortex], dtype=np.float64)
# keep only the triangles within the cortex label
cortex_triangles = triangles_keep_cortex(triangles, cortex)
return (cortex_vertices, cortex_triangles)
|
def _updateTargetFromNode(self):
""" Applies the configuration to its target axis
"""
self.viewBox.setAspectLocked(lock=self.configValue, ratio=self.aspectRatioCti.configValue)
|
def function[_updateTargetFromNode, parameter[self]]:
constant[ Applies the configuration to its target axis
]
call[name[self].viewBox.setAspectLocked, parameter[]]
|
keyword[def] identifier[_updateTargetFromNode] ( identifier[self] ):
literal[string]
identifier[self] . identifier[viewBox] . identifier[setAspectLocked] ( identifier[lock] = identifier[self] . identifier[configValue] , identifier[ratio] = identifier[self] . identifier[aspectRatioCti] . identifier[configValue] )
|
def _updateTargetFromNode(self):
""" Applies the configuration to its target axis
"""
self.viewBox.setAspectLocked(lock=self.configValue, ratio=self.aspectRatioCti.configValue)
|
def _validate_config(self):
"""Ensure at least one switch is configured"""
if len(cfg.CONF.ml2_arista.get('switch_info')) < 1:
msg = _('Required option - when "sec_group_support" is enabled, '
'at least one switch must be specified ')
LOG.exception(msg)
raise arista_exc.AristaConfigError(msg=msg)
|
def function[_validate_config, parameter[self]]:
constant[Ensure at least one switch is configured]
if compare[call[name[len], parameter[call[name[cfg].CONF.ml2_arista.get, parameter[constant[switch_info]]]]] less[<] constant[1]] begin[:]
variable[msg] assign[=] call[name[_], parameter[constant[Required option - when "sec_group_support" is enabled, at least one switch must be specified ]]]
call[name[LOG].exception, parameter[name[msg]]]
<ast.Raise object at 0x7da1b195a7d0>
|
keyword[def] identifier[_validate_config] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[cfg] . identifier[CONF] . identifier[ml2_arista] . identifier[get] ( literal[string] ))< literal[int] :
identifier[msg] = identifier[_] ( literal[string]
literal[string] )
identifier[LOG] . identifier[exception] ( identifier[msg] )
keyword[raise] identifier[arista_exc] . identifier[AristaConfigError] ( identifier[msg] = identifier[msg] )
|
def _validate_config(self):
"""Ensure at least one switch is configured"""
if len(cfg.CONF.ml2_arista.get('switch_info')) < 1:
msg = _('Required option - when "sec_group_support" is enabled, at least one switch must be specified ')
LOG.exception(msg)
raise arista_exc.AristaConfigError(msg=msg) # depends on [control=['if'], data=[]]
|
def weibull(target, seeds, shape, scale, loc):
r"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
"""
seeds = target[seeds]
value = spts.weibull_min.ppf(q=seeds, c=shape, scale=scale, loc=loc)
return value
|
def function[weibull, parameter[target, seeds, shape, scale, loc]]:
constant[
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
]
variable[seeds] assign[=] call[name[target]][name[seeds]]
variable[value] assign[=] call[name[spts].weibull_min.ppf, parameter[]]
return[name[value]]
|
keyword[def] identifier[weibull] ( identifier[target] , identifier[seeds] , identifier[shape] , identifier[scale] , identifier[loc] ):
literal[string]
identifier[seeds] = identifier[target] [ identifier[seeds] ]
identifier[value] = identifier[spts] . identifier[weibull_min] . identifier[ppf] ( identifier[q] = identifier[seeds] , identifier[c] = identifier[shape] , identifier[scale] = identifier[scale] , identifier[loc] = identifier[loc] )
keyword[return] identifier[value]
|
def weibull(target, seeds, shape, scale, loc):
"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
"""
seeds = target[seeds]
value = spts.weibull_min.ppf(q=seeds, c=shape, scale=scale, loc=loc)
return value
|
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
|
def function[_insert_lcl_level, parameter[pressure, temperature, lcl_pressure]]:
constant[Insert the LCL pressure into the profile.]
variable[interp_temp] assign[=] call[name[interpolate_1d], parameter[name[lcl_pressure], name[pressure], name[temperature]]]
variable[loc] assign[=] binary_operation[name[pressure].size - call[call[name[pressure]][<ast.Slice object at 0x7da1b22c7fd0>].searchsorted, parameter[name[lcl_pressure]]]]
return[binary_operation[call[name[np].insert, parameter[name[temperature].m, name[loc], name[interp_temp].m]] * name[temperature].units]]
|
keyword[def] identifier[_insert_lcl_level] ( identifier[pressure] , identifier[temperature] , identifier[lcl_pressure] ):
literal[string]
identifier[interp_temp] = identifier[interpolate_1d] ( identifier[lcl_pressure] , identifier[pressure] , identifier[temperature] )
identifier[loc] = identifier[pressure] . identifier[size] - identifier[pressure] [::- literal[int] ]. identifier[searchsorted] ( identifier[lcl_pressure] )
keyword[return] identifier[np] . identifier[insert] ( identifier[temperature] . identifier[m] , identifier[loc] , identifier[interp_temp] . identifier[m] )* identifier[temperature] . identifier[units]
|
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
|
def prepare(data):
"""Restructure/prepare data about commits for output."""
message = data.get("message")
sha = data.get("sha")
tree = data.get("tree")
tree_sha = tree.get("sha")
return {"message": message, "sha": sha, "tree": {"sha": tree_sha}}
|
def function[prepare, parameter[data]]:
constant[Restructure/prepare data about commits for output.]
variable[message] assign[=] call[name[data].get, parameter[constant[message]]]
variable[sha] assign[=] call[name[data].get, parameter[constant[sha]]]
variable[tree] assign[=] call[name[data].get, parameter[constant[tree]]]
variable[tree_sha] assign[=] call[name[tree].get, parameter[constant[sha]]]
return[dictionary[[<ast.Constant object at 0x7da20c992bf0>, <ast.Constant object at 0x7da20c991630>, <ast.Constant object at 0x7da20c990610>], [<ast.Name object at 0x7da20c992110>, <ast.Name object at 0x7da20c993bb0>, <ast.Dict object at 0x7da20c9902b0>]]]
|
keyword[def] identifier[prepare] ( identifier[data] ):
literal[string]
identifier[message] = identifier[data] . identifier[get] ( literal[string] )
identifier[sha] = identifier[data] . identifier[get] ( literal[string] )
identifier[tree] = identifier[data] . identifier[get] ( literal[string] )
identifier[tree_sha] = identifier[tree] . identifier[get] ( literal[string] )
keyword[return] { literal[string] : identifier[message] , literal[string] : identifier[sha] , literal[string] :{ literal[string] : identifier[tree_sha] }}
|
def prepare(data):
"""Restructure/prepare data about commits for output."""
message = data.get('message')
sha = data.get('sha')
tree = data.get('tree')
tree_sha = tree.get('sha')
return {'message': message, 'sha': sha, 'tree': {'sha': tree_sha}}
|
def _http_request(self, method, url_path, headers=None, query_params=None, body_params=None, files=None, **kwargs):
"""
Method to do http requests.
:param method:
:param url_path:
:param headers:
:param body_params:
:param query_params:
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload.
``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')``
or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file.
:param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value
according to the rendered body. By default: True.
:return:
"""
host = kwargs.get('host', self.host)
proxy = kwargs.get('proxy', self.proxy)
renderer = kwargs.get('renderer', MultiPartRenderer() if files else self.default_renderer)
prefix_url_path = kwargs.get('prefix_url_path', self.prefix_url_path)
authentication_instances = kwargs.get('authentication_instances', self.authentication_instances)
url_path_format = kwargs.get('url_path_format', self.url_path_format)
update_content_type = kwargs.get('update_content_type', True)
redirect = kwargs.get('redirect', False)
if headers is None:
headers = self.default_headers()
context = HttpRequestContext(
host=host, proxy=proxy, method=method,
prefix_url_path=prefix_url_path,
url_path=url_path,
url_path_params=self.url_path_params,
url_path_format=url_path_format,
headers=headers,
query_params=query_params,
body_params=body_params,
files=files,
renderer=renderer,
response_class=self.response_class,
authentication_instances=authentication_instances,
update_content_type=update_content_type,
redirect=redirect
)
res = self.http_request_from_context(context)
self.cookie.update(res.cookie)
return res
|
def function[_http_request, parameter[self, method, url_path, headers, query_params, body_params, files]]:
constant[
Method to do http requests.
:param method:
:param url_path:
:param headers:
:param body_params:
:param query_params:
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload.
``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')``
or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file.
:param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value
according to the rendered body. By default: True.
:return:
]
variable[host] assign[=] call[name[kwargs].get, parameter[constant[host], name[self].host]]
variable[proxy] assign[=] call[name[kwargs].get, parameter[constant[proxy], name[self].proxy]]
variable[renderer] assign[=] call[name[kwargs].get, parameter[constant[renderer], <ast.IfExp object at 0x7da1b171f4c0>]]
variable[prefix_url_path] assign[=] call[name[kwargs].get, parameter[constant[prefix_url_path], name[self].prefix_url_path]]
variable[authentication_instances] assign[=] call[name[kwargs].get, parameter[constant[authentication_instances], name[self].authentication_instances]]
variable[url_path_format] assign[=] call[name[kwargs].get, parameter[constant[url_path_format], name[self].url_path_format]]
variable[update_content_type] assign[=] call[name[kwargs].get, parameter[constant[update_content_type], constant[True]]]
variable[redirect] assign[=] call[name[kwargs].get, parameter[constant[redirect], constant[False]]]
if compare[name[headers] is constant[None]] begin[:]
variable[headers] assign[=] call[name[self].default_headers, parameter[]]
variable[context] assign[=] call[name[HttpRequestContext], parameter[]]
variable[res] assign[=] call[name[self].http_request_from_context, parameter[name[context]]]
call[name[self].cookie.update, parameter[name[res].cookie]]
return[name[res]]
|
keyword[def] identifier[_http_request] ( identifier[self] , identifier[method] , identifier[url_path] , identifier[headers] = keyword[None] , identifier[query_params] = keyword[None] , identifier[body_params] = keyword[None] , identifier[files] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[host] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[host] )
identifier[proxy] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[proxy] )
identifier[renderer] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[MultiPartRenderer] () keyword[if] identifier[files] keyword[else] identifier[self] . identifier[default_renderer] )
identifier[prefix_url_path] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[prefix_url_path] )
identifier[authentication_instances] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[authentication_instances] )
identifier[url_path_format] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[url_path_format] )
identifier[update_content_type] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] )
identifier[redirect] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[headers] keyword[is] keyword[None] :
identifier[headers] = identifier[self] . identifier[default_headers] ()
identifier[context] = identifier[HttpRequestContext] (
identifier[host] = identifier[host] , identifier[proxy] = identifier[proxy] , identifier[method] = identifier[method] ,
identifier[prefix_url_path] = identifier[prefix_url_path] ,
identifier[url_path] = identifier[url_path] ,
identifier[url_path_params] = identifier[self] . identifier[url_path_params] ,
identifier[url_path_format] = identifier[url_path_format] ,
identifier[headers] = identifier[headers] ,
identifier[query_params] = identifier[query_params] ,
identifier[body_params] = identifier[body_params] ,
identifier[files] = identifier[files] ,
identifier[renderer] = identifier[renderer] ,
identifier[response_class] = identifier[self] . identifier[response_class] ,
identifier[authentication_instances] = identifier[authentication_instances] ,
identifier[update_content_type] = identifier[update_content_type] ,
identifier[redirect] = identifier[redirect]
)
identifier[res] = identifier[self] . identifier[http_request_from_context] ( identifier[context] )
identifier[self] . identifier[cookie] . identifier[update] ( identifier[res] . identifier[cookie] )
keyword[return] identifier[res]
|
def _http_request(self, method, url_path, headers=None, query_params=None, body_params=None, files=None, **kwargs):
"""
Method to do http requests.
:param method:
:param url_path:
:param headers:
:param body_params:
:param query_params:
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload.
``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')``
or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file.
:param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value
according to the rendered body. By default: True.
:return:
"""
host = kwargs.get('host', self.host)
proxy = kwargs.get('proxy', self.proxy)
renderer = kwargs.get('renderer', MultiPartRenderer() if files else self.default_renderer)
prefix_url_path = kwargs.get('prefix_url_path', self.prefix_url_path)
authentication_instances = kwargs.get('authentication_instances', self.authentication_instances)
url_path_format = kwargs.get('url_path_format', self.url_path_format)
update_content_type = kwargs.get('update_content_type', True)
redirect = kwargs.get('redirect', False)
if headers is None:
headers = self.default_headers() # depends on [control=['if'], data=['headers']]
context = HttpRequestContext(host=host, proxy=proxy, method=method, prefix_url_path=prefix_url_path, url_path=url_path, url_path_params=self.url_path_params, url_path_format=url_path_format, headers=headers, query_params=query_params, body_params=body_params, files=files, renderer=renderer, response_class=self.response_class, authentication_instances=authentication_instances, update_content_type=update_content_type, redirect=redirect)
res = self.http_request_from_context(context)
self.cookie.update(res.cookie)
return res
|
def page_for(self, member, page_size=DEFAULT_PAGE_SIZE):
'''
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
'''
return self.page_for_in(self.leaderboard_name, member, page_size)
|
def function[page_for, parameter[self, member, page_size]]:
constant[
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
]
return[call[name[self].page_for_in, parameter[name[self].leaderboard_name, name[member], name[page_size]]]]
|
keyword[def] identifier[page_for] ( identifier[self] , identifier[member] , identifier[page_size] = identifier[DEFAULT_PAGE_SIZE] ):
literal[string]
keyword[return] identifier[self] . identifier[page_for_in] ( identifier[self] . identifier[leaderboard_name] , identifier[member] , identifier[page_size] )
|
def page_for(self, member, page_size=DEFAULT_PAGE_SIZE):
"""
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
"""
return self.page_for_in(self.leaderboard_name, member, page_size)
|
def copy(self, target=None, name=None):
""" Asynchronously creates a copy of this DriveItem and all it's
child elements.
:param target: target location to move to.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or Drive
:param name: a new name for the copy.
:rtype: CopyOperation
"""
if target is None and name is None:
raise ValueError('Must provide a target or a name (or both)')
if isinstance(target, Folder):
target_id = target.object_id
drive_id = target.drive_id
elif isinstance(target, Drive):
# we need the root folder
root_folder = target.get_root_folder()
if not root_folder:
return None
target_id = root_folder.object_id
drive_id = root_folder.drive_id
elif target is None:
target_id = None
drive_id = None
else:
raise ValueError('Target, if provided, must be a Folder or Drive')
if not self.object_id:
return None
if target_id == 'root':
raise ValueError("When copying, target id can't be 'root'")
url = self.build_url(
self._endpoints.get('copy').format(id=self.object_id))
if target_id and drive_id:
data = {'parentReference': {'id': target_id, 'driveId': drive_id}}
else:
data = {}
if name:
# incorporate the extension if the name provided has none.
if not Path(name).suffix and self.name:
name = name + Path(self.name).suffix
data['name'] = name
response = self.con.post(url, data=data)
if not response:
return None
# Find out if the server has run a Sync or Async operation
location = response.headers.get('Location', None)
if 'monitor' in location:
# Async operation
return CopyOperation(parent=self.drive, monitor_url=location)
else:
# Sync operation. Item is ready to be retrieved
path = urlparse(location).path
item_id = path.split('/')[-1]
return CopyOperation(parent=self.drive, item_id=item_id)
|
def function[copy, parameter[self, target, name]]:
constant[ Asynchronously creates a copy of this DriveItem and all it's
child elements.
:param target: target location to move to.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or Drive
:param name: a new name for the copy.
:rtype: CopyOperation
]
if <ast.BoolOp object at 0x7da1b1baf5e0> begin[:]
<ast.Raise object at 0x7da1b1bafc10>
if call[name[isinstance], parameter[name[target], name[Folder]]] begin[:]
variable[target_id] assign[=] name[target].object_id
variable[drive_id] assign[=] name[target].drive_id
if <ast.UnaryOp object at 0x7da1b1bae6e0> begin[:]
return[constant[None]]
if compare[name[target_id] equal[==] constant[root]] begin[:]
<ast.Raise object at 0x7da1b1baf7f0>
variable[url] assign[=] call[name[self].build_url, parameter[call[call[name[self]._endpoints.get, parameter[constant[copy]]].format, parameter[]]]]
if <ast.BoolOp object at 0x7da1b1baf190> begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1bac340>], [<ast.Dict object at 0x7da1b1bae4a0>]]
if name[name] begin[:]
if <ast.BoolOp object at 0x7da1b1bad780> begin[:]
variable[name] assign[=] binary_operation[name[name] + call[name[Path], parameter[name[self].name]].suffix]
call[name[data]][constant[name]] assign[=] name[name]
variable[response] assign[=] call[name[self].con.post, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b1baeb90> begin[:]
return[constant[None]]
variable[location] assign[=] call[name[response].headers.get, parameter[constant[Location], constant[None]]]
if compare[constant[monitor] in name[location]] begin[:]
return[call[name[CopyOperation], parameter[]]]
|
keyword[def] identifier[copy] ( identifier[self] , identifier[target] = keyword[None] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[target] keyword[is] keyword[None] keyword[and] identifier[name] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[target] , identifier[Folder] ):
identifier[target_id] = identifier[target] . identifier[object_id]
identifier[drive_id] = identifier[target] . identifier[drive_id]
keyword[elif] identifier[isinstance] ( identifier[target] , identifier[Drive] ):
identifier[root_folder] = identifier[target] . identifier[get_root_folder] ()
keyword[if] keyword[not] identifier[root_folder] :
keyword[return] keyword[None]
identifier[target_id] = identifier[root_folder] . identifier[object_id]
identifier[drive_id] = identifier[root_folder] . identifier[drive_id]
keyword[elif] identifier[target] keyword[is] keyword[None] :
identifier[target_id] = keyword[None]
identifier[drive_id] = keyword[None]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[object_id] :
keyword[return] keyword[None]
keyword[if] identifier[target_id] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[url] = identifier[self] . identifier[build_url] (
identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] ). identifier[format] ( identifier[id] = identifier[self] . identifier[object_id] ))
keyword[if] identifier[target_id] keyword[and] identifier[drive_id] :
identifier[data] ={ literal[string] :{ literal[string] : identifier[target_id] , literal[string] : identifier[drive_id] }}
keyword[else] :
identifier[data] ={}
keyword[if] identifier[name] :
keyword[if] keyword[not] identifier[Path] ( identifier[name] ). identifier[suffix] keyword[and] identifier[self] . identifier[name] :
identifier[name] = identifier[name] + identifier[Path] ( identifier[self] . identifier[name] ). identifier[suffix]
identifier[data] [ literal[string] ]= identifier[name]
identifier[response] = identifier[self] . identifier[con] . identifier[post] ( identifier[url] , identifier[data] = identifier[data] )
keyword[if] keyword[not] identifier[response] :
keyword[return] keyword[None]
identifier[location] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] literal[string] keyword[in] identifier[location] :
keyword[return] identifier[CopyOperation] ( identifier[parent] = identifier[self] . identifier[drive] , identifier[monitor_url] = identifier[location] )
keyword[else] :
identifier[path] = identifier[urlparse] ( identifier[location] ). identifier[path]
identifier[item_id] = identifier[path] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[return] identifier[CopyOperation] ( identifier[parent] = identifier[self] . identifier[drive] , identifier[item_id] = identifier[item_id] )
|
def copy(self, target=None, name=None):
""" Asynchronously creates a copy of this DriveItem and all it's
child elements.
:param target: target location to move to.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or Drive
:param name: a new name for the copy.
:rtype: CopyOperation
"""
if target is None and name is None:
raise ValueError('Must provide a target or a name (or both)') # depends on [control=['if'], data=[]]
if isinstance(target, Folder):
target_id = target.object_id
drive_id = target.drive_id # depends on [control=['if'], data=[]]
elif isinstance(target, Drive):
# we need the root folder
root_folder = target.get_root_folder()
if not root_folder:
return None # depends on [control=['if'], data=[]]
target_id = root_folder.object_id
drive_id = root_folder.drive_id # depends on [control=['if'], data=[]]
elif target is None:
target_id = None
drive_id = None # depends on [control=['if'], data=[]]
else:
raise ValueError('Target, if provided, must be a Folder or Drive')
if not self.object_id:
return None # depends on [control=['if'], data=[]]
if target_id == 'root':
raise ValueError("When copying, target id can't be 'root'") # depends on [control=['if'], data=[]]
url = self.build_url(self._endpoints.get('copy').format(id=self.object_id))
if target_id and drive_id:
data = {'parentReference': {'id': target_id, 'driveId': drive_id}} # depends on [control=['if'], data=[]]
else:
data = {}
if name:
# incorporate the extension if the name provided has none.
if not Path(name).suffix and self.name:
name = name + Path(self.name).suffix # depends on [control=['if'], data=[]]
data['name'] = name # depends on [control=['if'], data=[]]
response = self.con.post(url, data=data)
if not response:
return None # depends on [control=['if'], data=[]]
# Find out if the server has run a Sync or Async operation
location = response.headers.get('Location', None)
if 'monitor' in location:
# Async operation
return CopyOperation(parent=self.drive, monitor_url=location) # depends on [control=['if'], data=['location']]
else:
# Sync operation. Item is ready to be retrieved
path = urlparse(location).path
item_id = path.split('/')[-1]
return CopyOperation(parent=self.drive, item_id=item_id)
|
def unpack_char16(self, data):
"""
Unpack a CIM-XML string value of CIM type 'char16' and return it
as a unicode string object, or None.
data (unicode string): CIM-XML string value, or None (in which case
None is returned).
"""
if data is None:
return None
len_data = len(data)
if len_data == 0:
raise CIMXMLParseError(
"Char16 value is empty",
conn_id=self.conn_id)
if len_data > 1:
# More than one character, or one character from the UCS-4 set
# in a narrow Python build (which represents it using
# surrogates).
raise CIMXMLParseError(
_format("Char16 value has more than one UCS-2 "
"character: {0!A}", data),
conn_id=self.conn_id)
if ord(data) > 0xFFFF:
# One character from the UCS-4 set in a wide Python build.
raise CIMXMLParseError(
_format("Char16 value is a character outside of the "
"UCS-2 range: {0!A}", data),
conn_id=self.conn_id)
return data
|
def function[unpack_char16, parameter[self, data]]:
constant[
Unpack a CIM-XML string value of CIM type 'char16' and return it
as a unicode string object, or None.
data (unicode string): CIM-XML string value, or None (in which case
None is returned).
]
if compare[name[data] is constant[None]] begin[:]
return[constant[None]]
variable[len_data] assign[=] call[name[len], parameter[name[data]]]
if compare[name[len_data] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da204346320>
if compare[name[len_data] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da204346260>
if compare[call[name[ord], parameter[name[data]]] greater[>] constant[65535]] begin[:]
<ast.Raise object at 0x7da204347b20>
return[name[data]]
|
keyword[def] identifier[unpack_char16] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[len_data] = identifier[len] ( identifier[data] )
keyword[if] identifier[len_data] == literal[int] :
keyword[raise] identifier[CIMXMLParseError] (
literal[string] ,
identifier[conn_id] = identifier[self] . identifier[conn_id] )
keyword[if] identifier[len_data] > literal[int] :
keyword[raise] identifier[CIMXMLParseError] (
identifier[_format] ( literal[string]
literal[string] , identifier[data] ),
identifier[conn_id] = identifier[self] . identifier[conn_id] )
keyword[if] identifier[ord] ( identifier[data] )> literal[int] :
keyword[raise] identifier[CIMXMLParseError] (
identifier[_format] ( literal[string]
literal[string] , identifier[data] ),
identifier[conn_id] = identifier[self] . identifier[conn_id] )
keyword[return] identifier[data]
|
def unpack_char16(self, data):
"""
Unpack a CIM-XML string value of CIM type 'char16' and return it
as a unicode string object, or None.
data (unicode string): CIM-XML string value, or None (in which case
None is returned).
"""
if data is None:
return None # depends on [control=['if'], data=[]]
len_data = len(data)
if len_data == 0:
raise CIMXMLParseError('Char16 value is empty', conn_id=self.conn_id) # depends on [control=['if'], data=[]]
if len_data > 1:
# More than one character, or one character from the UCS-4 set
# in a narrow Python build (which represents it using
# surrogates).
raise CIMXMLParseError(_format('Char16 value has more than one UCS-2 character: {0!A}', data), conn_id=self.conn_id) # depends on [control=['if'], data=[]]
if ord(data) > 65535:
# One character from the UCS-4 set in a wide Python build.
raise CIMXMLParseError(_format('Char16 value is a character outside of the UCS-2 range: {0!A}', data), conn_id=self.conn_id) # depends on [control=['if'], data=[]]
return data
|
def _install_kraken_db(datadir, args):
"""Install kraken minimal DB in genome folder.
"""
import requests
kraken = os.path.join(datadir, "genomes/kraken")
url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz"
compress = os.path.join(kraken, os.path.basename(url))
base, ext = utils.splitext_plus(os.path.basename(url))
db = os.path.join(kraken, base)
tooldir = args.tooldir or get_defaults()["tooldir"]
requests.packages.urllib3.disable_warnings()
last_mod = urllib.request.urlopen(url).info().get('Last-Modified')
last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc())
if os.path.exists(os.path.join(tooldir, "bin", "kraken")):
if not os.path.exists(db):
is_new_version = True
else:
cur_file = glob.glob(os.path.join(kraken, "minikraken_*"))[0]
cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file))
is_new_version = last_mod.date() > cur_version.date()
if is_new_version:
shutil.move(cur_file, cur_file.replace('minikraken', 'old'))
if not os.path.exists(kraken):
utils.safe_makedir(kraken)
if is_new_version:
if not os.path.exists(compress):
subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"])
cmd = ["tar", "-xzvf", compress, "-C", kraken]
subprocess.check_call(cmd)
last_version = glob.glob(os.path.join(kraken, "minikraken_*"))
utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, "minikraken"))
utils.remove_safe(compress)
else:
print("You have the latest version %s." % last_mod)
else:
raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." %
os.path.join(tooldir, "bin", "kraken"))
|
def function[_install_kraken_db, parameter[datadir, args]]:
constant[Install kraken minimal DB in genome folder.
]
import module[requests]
variable[kraken] assign[=] call[name[os].path.join, parameter[name[datadir], constant[genomes/kraken]]]
variable[url] assign[=] constant[https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz]
variable[compress] assign[=] call[name[os].path.join, parameter[name[kraken], call[name[os].path.basename, parameter[name[url]]]]]
<ast.Tuple object at 0x7da1b19b9e10> assign[=] call[name[utils].splitext_plus, parameter[call[name[os].path.basename, parameter[name[url]]]]]
variable[db] assign[=] call[name[os].path.join, parameter[name[kraken], name[base]]]
variable[tooldir] assign[=] <ast.BoolOp object at 0x7da1b19b9f30>
call[name[requests].packages.urllib3.disable_warnings, parameter[]]
variable[last_mod] assign[=] call[call[call[name[urllib].request.urlopen, parameter[name[url]]].info, parameter[]].get, parameter[constant[Last-Modified]]]
variable[last_mod] assign[=] call[call[name[dateutil].parser.parse, parameter[name[last_mod]]].astimezone, parameter[call[name[dateutil].tz.tzutc, parameter[]]]]
if call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[tooldir], constant[bin], constant[kraken]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b19bb670> begin[:]
variable[is_new_version] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da20c6e7e50> begin[:]
call[name[utils].safe_makedir, parameter[name[kraken]]]
if name[is_new_version] begin[:]
if <ast.UnaryOp object at 0x7da20c6e4e20> begin[:]
call[name[subprocess].check_call, parameter[list[[<ast.Constant object at 0x7da20c6e4190>, <ast.Constant object at 0x7da20c6e7c10>, <ast.Name object at 0x7da20c6e4dc0>, <ast.Name object at 0x7da20c6e74c0>, <ast.Constant object at 0x7da20c6e6c80>]]]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c6e7c40>, <ast.Constant object at 0x7da20c6e5720>, <ast.Name object at 0x7da20c6e64a0>, <ast.Constant object at 0x7da20c6e56c0>, <ast.Name object at 0x7da20c6e7f10>]]
call[name[subprocess].check_call, parameter[name[cmd]]]
variable[last_version] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[kraken], constant[minikraken_*]]]]]
call[name[utils].symlink_plus, parameter[call[name[os].path.join, parameter[name[kraken], call[name[last_version]][constant[0]]]], call[name[os].path.join, parameter[name[kraken], constant[minikraken]]]]]
call[name[utils].remove_safe, parameter[name[compress]]]
|
keyword[def] identifier[_install_kraken_db] ( identifier[datadir] , identifier[args] ):
literal[string]
keyword[import] identifier[requests]
identifier[kraken] = identifier[os] . identifier[path] . identifier[join] ( identifier[datadir] , literal[string] )
identifier[url] = literal[string]
identifier[compress] = identifier[os] . identifier[path] . identifier[join] ( identifier[kraken] , identifier[os] . identifier[path] . identifier[basename] ( identifier[url] ))
identifier[base] , identifier[ext] = identifier[utils] . identifier[splitext_plus] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[url] ))
identifier[db] = identifier[os] . identifier[path] . identifier[join] ( identifier[kraken] , identifier[base] )
identifier[tooldir] = identifier[args] . identifier[tooldir] keyword[or] identifier[get_defaults] ()[ literal[string] ]
identifier[requests] . identifier[packages] . identifier[urllib3] . identifier[disable_warnings] ()
identifier[last_mod] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[url] ). identifier[info] (). identifier[get] ( literal[string] )
identifier[last_mod] = identifier[dateutil] . identifier[parser] . identifier[parse] ( identifier[last_mod] ). identifier[astimezone] ( identifier[dateutil] . identifier[tz] . identifier[tzutc] ())
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tooldir] , literal[string] , literal[string] )):
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[db] ):
identifier[is_new_version] = keyword[True]
keyword[else] :
identifier[cur_file] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[kraken] , literal[string] ))[ literal[int] ]
identifier[cur_version] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[os] . identifier[path] . identifier[getmtime] ( identifier[cur_file] ))
identifier[is_new_version] = identifier[last_mod] . identifier[date] ()> identifier[cur_version] . identifier[date] ()
keyword[if] identifier[is_new_version] :
identifier[shutil] . identifier[move] ( identifier[cur_file] , identifier[cur_file] . identifier[replace] ( literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[kraken] ):
identifier[utils] . identifier[safe_makedir] ( identifier[kraken] )
keyword[if] identifier[is_new_version] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[compress] ):
identifier[subprocess] . identifier[check_call] ([ literal[string] , literal[string] , identifier[compress] , identifier[url] , literal[string] ])
identifier[cmd] =[ literal[string] , literal[string] , identifier[compress] , literal[string] , identifier[kraken] ]
identifier[subprocess] . identifier[check_call] ( identifier[cmd] )
identifier[last_version] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[kraken] , literal[string] ))
identifier[utils] . identifier[symlink_plus] ( identifier[os] . identifier[path] . identifier[join] ( identifier[kraken] , identifier[last_version] [ literal[int] ]), identifier[os] . identifier[path] . identifier[join] ( identifier[kraken] , literal[string] ))
identifier[utils] . identifier[remove_safe] ( identifier[compress] )
keyword[else] :
identifier[print] ( literal[string] % identifier[last_mod] )
keyword[else] :
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( literal[string] %
identifier[os] . identifier[path] . identifier[join] ( identifier[tooldir] , literal[string] , literal[string] ))
|
def _install_kraken_db(datadir, args):
"""Install kraken minimal DB in genome folder.
"""
import requests
kraken = os.path.join(datadir, 'genomes/kraken')
url = 'https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz'
compress = os.path.join(kraken, os.path.basename(url))
(base, ext) = utils.splitext_plus(os.path.basename(url))
db = os.path.join(kraken, base)
tooldir = args.tooldir or get_defaults()['tooldir']
requests.packages.urllib3.disable_warnings()
last_mod = urllib.request.urlopen(url).info().get('Last-Modified')
last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc())
if os.path.exists(os.path.join(tooldir, 'bin', 'kraken')):
if not os.path.exists(db):
is_new_version = True # depends on [control=['if'], data=[]]
else:
cur_file = glob.glob(os.path.join(kraken, 'minikraken_*'))[0]
cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file))
is_new_version = last_mod.date() > cur_version.date()
if is_new_version:
shutil.move(cur_file, cur_file.replace('minikraken', 'old')) # depends on [control=['if'], data=[]]
if not os.path.exists(kraken):
utils.safe_makedir(kraken) # depends on [control=['if'], data=[]]
if is_new_version:
if not os.path.exists(compress):
subprocess.check_call(['wget', '-O', compress, url, '--no-check-certificate']) # depends on [control=['if'], data=[]]
cmd = ['tar', '-xzvf', compress, '-C', kraken]
subprocess.check_call(cmd)
last_version = glob.glob(os.path.join(kraken, 'minikraken_*'))
utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, 'minikraken'))
utils.remove_safe(compress) # depends on [control=['if'], data=[]]
else:
print('You have the latest version %s.' % last_mod) # depends on [control=['if'], data=[]]
else:
raise argparse.ArgumentTypeError('kraken not installed in tooldir %s.' % os.path.join(tooldir, 'bin', 'kraken'))
|
def explore_batch(traj, batch):
"""Chooses exploration according to `batch`"""
explore_dict = {}
explore_dict['sigma'] = np.arange(10.0 * batch, 10.0*(batch+1), 1.0).tolist()
# for batch = 0 explores sigma in [0.0, 1.0, 2.0, ..., 9.0],
# for batch = 1 explores sigma in [10.0, 11.0, 12.0, ..., 19.0]
# and so on
traj.f_explore(explore_dict)
|
def function[explore_batch, parameter[traj, batch]]:
constant[Chooses exploration according to `batch`]
variable[explore_dict] assign[=] dictionary[[], []]
call[name[explore_dict]][constant[sigma]] assign[=] call[call[name[np].arange, parameter[binary_operation[constant[10.0] * name[batch]], binary_operation[constant[10.0] * binary_operation[name[batch] + constant[1]]], constant[1.0]]].tolist, parameter[]]
call[name[traj].f_explore, parameter[name[explore_dict]]]
|
keyword[def] identifier[explore_batch] ( identifier[traj] , identifier[batch] ):
literal[string]
identifier[explore_dict] ={}
identifier[explore_dict] [ literal[string] ]= identifier[np] . identifier[arange] ( literal[int] * identifier[batch] , literal[int] *( identifier[batch] + literal[int] ), literal[int] ). identifier[tolist] ()
identifier[traj] . identifier[f_explore] ( identifier[explore_dict] )
|
def explore_batch(traj, batch):
"""Chooses exploration according to `batch`"""
explore_dict = {}
explore_dict['sigma'] = np.arange(10.0 * batch, 10.0 * (batch + 1), 1.0).tolist()
# for batch = 0 explores sigma in [0.0, 1.0, 2.0, ..., 9.0],
# for batch = 1 explores sigma in [10.0, 11.0, 12.0, ..., 19.0]
# and so on
traj.f_explore(explore_dict)
|
def changepassword(self, event):
"""An enrolled user wants to change their password"""
old = event.data['old']
new = event.data['new']
uuid = event.user.uuid
# TODO: Write email to notify user of password change
user = objectmodels['user'].find_one({'uuid': uuid})
if std_hash(old, self.salt) == user.passhash:
user.passhash = std_hash(new, self.salt)
user.save()
packet = {
'component': 'hfos.enrol.enrolmanager',
'action': 'changepassword',
'data': True
}
self.fireEvent(send(event.client.uuid, packet))
self.log('Successfully changed password for user', uuid)
else:
packet = {
'component': 'hfos.enrol.enrolmanager',
'action': 'changepassword',
'data': False
}
self.fireEvent(send(event.client.uuid, packet))
self.log('User tried to change password without supplying old one', lvl=warn)
|
def function[changepassword, parameter[self, event]]:
constant[An enrolled user wants to change their password]
variable[old] assign[=] call[name[event].data][constant[old]]
variable[new] assign[=] call[name[event].data][constant[new]]
variable[uuid] assign[=] name[event].user.uuid
variable[user] assign[=] call[call[name[objectmodels]][constant[user]].find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0f3bdc0>], [<ast.Name object at 0x7da1b0f3a710>]]]]
if compare[call[name[std_hash], parameter[name[old], name[self].salt]] equal[==] name[user].passhash] begin[:]
name[user].passhash assign[=] call[name[std_hash], parameter[name[new], name[self].salt]]
call[name[user].save, parameter[]]
variable[packet] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f455d0>, <ast.Constant object at 0x7da1b0f44c10>, <ast.Constant object at 0x7da1b0f47820>], [<ast.Constant object at 0x7da1b0f460b0>, <ast.Constant object at 0x7da1b0f46980>, <ast.Constant object at 0x7da1b0f45f60>]]
call[name[self].fireEvent, parameter[call[name[send], parameter[name[event].client.uuid, name[packet]]]]]
call[name[self].log, parameter[constant[Successfully changed password for user], name[uuid]]]
|
keyword[def] identifier[changepassword] ( identifier[self] , identifier[event] ):
literal[string]
identifier[old] = identifier[event] . identifier[data] [ literal[string] ]
identifier[new] = identifier[event] . identifier[data] [ literal[string] ]
identifier[uuid] = identifier[event] . identifier[user] . identifier[uuid]
identifier[user] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : identifier[uuid] })
keyword[if] identifier[std_hash] ( identifier[old] , identifier[self] . identifier[salt] )== identifier[user] . identifier[passhash] :
identifier[user] . identifier[passhash] = identifier[std_hash] ( identifier[new] , identifier[self] . identifier[salt] )
identifier[user] . identifier[save] ()
identifier[packet] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[True]
}
identifier[self] . identifier[fireEvent] ( identifier[send] ( identifier[event] . identifier[client] . identifier[uuid] , identifier[packet] ))
identifier[self] . identifier[log] ( literal[string] , identifier[uuid] )
keyword[else] :
identifier[packet] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[False]
}
identifier[self] . identifier[fireEvent] ( identifier[send] ( identifier[event] . identifier[client] . identifier[uuid] , identifier[packet] ))
identifier[self] . identifier[log] ( literal[string] , identifier[lvl] = identifier[warn] )
|
def changepassword(self, event):
"""An enrolled user wants to change their password"""
old = event.data['old']
new = event.data['new']
uuid = event.user.uuid
# TODO: Write email to notify user of password change
user = objectmodels['user'].find_one({'uuid': uuid})
if std_hash(old, self.salt) == user.passhash:
user.passhash = std_hash(new, self.salt)
user.save()
packet = {'component': 'hfos.enrol.enrolmanager', 'action': 'changepassword', 'data': True}
self.fireEvent(send(event.client.uuid, packet))
self.log('Successfully changed password for user', uuid) # depends on [control=['if'], data=[]]
else:
packet = {'component': 'hfos.enrol.enrolmanager', 'action': 'changepassword', 'data': False}
self.fireEvent(send(event.client.uuid, packet))
self.log('User tried to change password without supplying old one', lvl=warn)
|
def check(self, radl):
"""Check the features in this network."""
SIMPLE_FEATURES = {
"host": (str, None),
"credentials.username": (str, None),
"credentials.password": (str, None),
"credentials.private_key": (str, None)
}
self.check_simple(SIMPLE_FEATURES, radl)
if not self.getHost():
raise RADLParseException("Ansible host must have a host", line=self.line)
(username, password, private_key) = self.getCredentialValues()
if not username:
raise RADLParseException("Ansible host must have a credentials.username", line=self.line)
if not password and not private_key:
raise RADLParseException("Ansible host must have a credentials.password or credentials.private_key",
line=self.line)
|
def function[check, parameter[self, radl]]:
constant[Check the features in this network.]
variable[SIMPLE_FEATURES] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ae07c0>, <ast.Constant object at 0x7da1b0ae1ea0>, <ast.Constant object at 0x7da1b0ae2290>, <ast.Constant object at 0x7da1b0ae2650>], [<ast.Tuple object at 0x7da1b0ae3ca0>, <ast.Tuple object at 0x7da1b0ae3730>, <ast.Tuple object at 0x7da1b0ae3d30>, <ast.Tuple object at 0x7da1b0ae3670>]]
call[name[self].check_simple, parameter[name[SIMPLE_FEATURES], name[radl]]]
if <ast.UnaryOp object at 0x7da20c7945b0> begin[:]
<ast.Raise object at 0x7da20c794af0>
<ast.Tuple object at 0x7da20c794c70> assign[=] call[name[self].getCredentialValues, parameter[]]
if <ast.UnaryOp object at 0x7da20c795c30> begin[:]
<ast.Raise object at 0x7da20c794880>
if <ast.BoolOp object at 0x7da20c796b30> begin[:]
<ast.Raise object at 0x7da20c794cd0>
|
keyword[def] identifier[check] ( identifier[self] , identifier[radl] ):
literal[string]
identifier[SIMPLE_FEATURES] ={
literal[string] :( identifier[str] , keyword[None] ),
literal[string] :( identifier[str] , keyword[None] ),
literal[string] :( identifier[str] , keyword[None] ),
literal[string] :( identifier[str] , keyword[None] )
}
identifier[self] . identifier[check_simple] ( identifier[SIMPLE_FEATURES] , identifier[radl] )
keyword[if] keyword[not] identifier[self] . identifier[getHost] ():
keyword[raise] identifier[RADLParseException] ( literal[string] , identifier[line] = identifier[self] . identifier[line] )
( identifier[username] , identifier[password] , identifier[private_key] )= identifier[self] . identifier[getCredentialValues] ()
keyword[if] keyword[not] identifier[username] :
keyword[raise] identifier[RADLParseException] ( literal[string] , identifier[line] = identifier[self] . identifier[line] )
keyword[if] keyword[not] identifier[password] keyword[and] keyword[not] identifier[private_key] :
keyword[raise] identifier[RADLParseException] ( literal[string] ,
identifier[line] = identifier[self] . identifier[line] )
|
def check(self, radl):
"""Check the features in this network."""
SIMPLE_FEATURES = {'host': (str, None), 'credentials.username': (str, None), 'credentials.password': (str, None), 'credentials.private_key': (str, None)}
self.check_simple(SIMPLE_FEATURES, radl)
if not self.getHost():
raise RADLParseException('Ansible host must have a host', line=self.line) # depends on [control=['if'], data=[]]
(username, password, private_key) = self.getCredentialValues()
if not username:
raise RADLParseException('Ansible host must have a credentials.username', line=self.line) # depends on [control=['if'], data=[]]
if not password and (not private_key):
raise RADLParseException('Ansible host must have a credentials.password or credentials.private_key', line=self.line) # depends on [control=['if'], data=[]]
|
def setProfile(self, profile):
"""
Sets the profile linked with this action.
:param profile | <projexui.widgets.xviewwidget.XViewProfile>
"""
self._profile = profile
# update the interface
self.setIcon(profile.icon())
self.setText(profile.name())
self.setToolTip(profile.description())
|
def function[setProfile, parameter[self, profile]]:
constant[
Sets the profile linked with this action.
:param profile | <projexui.widgets.xviewwidget.XViewProfile>
]
name[self]._profile assign[=] name[profile]
call[name[self].setIcon, parameter[call[name[profile].icon, parameter[]]]]
call[name[self].setText, parameter[call[name[profile].name, parameter[]]]]
call[name[self].setToolTip, parameter[call[name[profile].description, parameter[]]]]
|
keyword[def] identifier[setProfile] ( identifier[self] , identifier[profile] ):
literal[string]
identifier[self] . identifier[_profile] = identifier[profile]
identifier[self] . identifier[setIcon] ( identifier[profile] . identifier[icon] ())
identifier[self] . identifier[setText] ( identifier[profile] . identifier[name] ())
identifier[self] . identifier[setToolTip] ( identifier[profile] . identifier[description] ())
|
def setProfile(self, profile):
"""
Sets the profile linked with this action.
:param profile | <projexui.widgets.xviewwidget.XViewProfile>
"""
self._profile = profile # update the interface
self.setIcon(profile.icon())
self.setText(profile.name())
self.setToolTip(profile.description())
|
def thread_raise(thread, exctype):
'''
Raises or queues the exception `exctype` for the thread `thread`.
See the documentation on the function `thread_exception_gate()` for more
information.
Adapted from http://tomerfiliba.com/recipes/Thread2/ which explains:
"The exception will be raised only when executing python bytecode. If your
thread calls a native/built-in blocking function, the exception will be
raised only when execution returns to the python code."
Raises:
TypeError if `exctype` is not a class
ValueError, SystemError in case of unexpected problems
'''
import ctypes, inspect, threading, logging
if not inspect.isclass(exctype):
raise TypeError(
'cannot raise %s, only exception types can be raised (not '
'instances)' % exctype)
gate = thread_exception_gate(thread)
with gate.lock:
if gate.ok_to_raise.is_set() and thread.is_alive():
gate.ok_to_raise.clear()
logging.info('raising %s in thread %s', exctype, thread)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), ctypes.py_object(exctype))
if res == 0:
raise ValueError(
'invalid thread id? thread.ident=%s' % thread.ident)
elif res != 1:
# if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, 0)
raise SystemError('PyThreadState_SetAsyncExc failed')
else:
logging.info('queueing %s for thread %s', exctype, thread)
gate.queue_exception(exctype)
|
def function[thread_raise, parameter[thread, exctype]]:
constant[
Raises or queues the exception `exctype` for the thread `thread`.
See the documentation on the function `thread_exception_gate()` for more
information.
Adapted from http://tomerfiliba.com/recipes/Thread2/ which explains:
"The exception will be raised only when executing python bytecode. If your
thread calls a native/built-in blocking function, the exception will be
raised only when execution returns to the python code."
Raises:
TypeError if `exctype` is not a class
ValueError, SystemError in case of unexpected problems
]
import module[ctypes], module[inspect], module[threading], module[logging]
if <ast.UnaryOp object at 0x7da1b1e97100> begin[:]
<ast.Raise object at 0x7da1b1e97550>
variable[gate] assign[=] call[name[thread_exception_gate], parameter[name[thread]]]
with name[gate].lock begin[:]
if <ast.BoolOp object at 0x7da1b1e97880> begin[:]
call[name[gate].ok_to_raise.clear, parameter[]]
call[name[logging].info, parameter[constant[raising %s in thread %s], name[exctype], name[thread]]]
variable[res] assign[=] call[name[ctypes].pythonapi.PyThreadState_SetAsyncExc, parameter[call[name[ctypes].c_long, parameter[name[thread].ident]], call[name[ctypes].py_object, parameter[name[exctype]]]]]
if compare[name[res] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1e977c0>
|
keyword[def] identifier[thread_raise] ( identifier[thread] , identifier[exctype] ):
literal[string]
keyword[import] identifier[ctypes] , identifier[inspect] , identifier[threading] , identifier[logging]
keyword[if] keyword[not] identifier[inspect] . identifier[isclass] ( identifier[exctype] ):
keyword[raise] identifier[TypeError] (
literal[string]
literal[string] % identifier[exctype] )
identifier[gate] = identifier[thread_exception_gate] ( identifier[thread] )
keyword[with] identifier[gate] . identifier[lock] :
keyword[if] identifier[gate] . identifier[ok_to_raise] . identifier[is_set] () keyword[and] identifier[thread] . identifier[is_alive] ():
identifier[gate] . identifier[ok_to_raise] . identifier[clear] ()
identifier[logging] . identifier[info] ( literal[string] , identifier[exctype] , identifier[thread] )
identifier[res] = identifier[ctypes] . identifier[pythonapi] . identifier[PyThreadState_SetAsyncExc] (
identifier[ctypes] . identifier[c_long] ( identifier[thread] . identifier[ident] ), identifier[ctypes] . identifier[py_object] ( identifier[exctype] ))
keyword[if] identifier[res] == literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] % identifier[thread] . identifier[ident] )
keyword[elif] identifier[res] != literal[int] :
identifier[ctypes] . identifier[pythonapi] . identifier[PyThreadState_SetAsyncExc] ( identifier[thread] . identifier[ident] , literal[int] )
keyword[raise] identifier[SystemError] ( literal[string] )
keyword[else] :
identifier[logging] . identifier[info] ( literal[string] , identifier[exctype] , identifier[thread] )
identifier[gate] . identifier[queue_exception] ( identifier[exctype] )
|
def thread_raise(thread, exctype):
"""
Raises or queues the exception `exctype` for the thread `thread`.
See the documentation on the function `thread_exception_gate()` for more
information.
Adapted from http://tomerfiliba.com/recipes/Thread2/ which explains:
"The exception will be raised only when executing python bytecode. If your
thread calls a native/built-in blocking function, the exception will be
raised only when execution returns to the python code."
Raises:
TypeError if `exctype` is not a class
ValueError, SystemError in case of unexpected problems
"""
import ctypes, inspect, threading, logging
if not inspect.isclass(exctype):
raise TypeError('cannot raise %s, only exception types can be raised (not instances)' % exctype) # depends on [control=['if'], data=[]]
gate = thread_exception_gate(thread)
with gate.lock:
if gate.ok_to_raise.is_set() and thread.is_alive():
gate.ok_to_raise.clear()
logging.info('raising %s in thread %s', exctype, thread)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), ctypes.py_object(exctype))
if res == 0:
raise ValueError('invalid thread id? thread.ident=%s' % thread.ident) # depends on [control=['if'], data=[]]
elif res != 1:
# if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, 0)
raise SystemError('PyThreadState_SetAsyncExc failed') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
logging.info('queueing %s for thread %s', exctype, thread)
gate.queue_exception(exctype) # depends on [control=['with'], data=[]]
|
def handle_chld(self, sig, frame):
"""
SIGCHLD handling
:param sig:
:param frame:
:return:
"""
try:
while True:
wpid, status = waitpid(-1, WNOHANG)
if not wpid:
break
# self.stdout.write('%d,%d\n' % (wpid, status))
except:
pass
|
def function[handle_chld, parameter[self, sig, frame]]:
constant[
SIGCHLD handling
:param sig:
:param frame:
:return:
]
<ast.Try object at 0x7da18ede78b0>
|
keyword[def] identifier[handle_chld] ( identifier[self] , identifier[sig] , identifier[frame] ):
literal[string]
keyword[try] :
keyword[while] keyword[True] :
identifier[wpid] , identifier[status] = identifier[waitpid] (- literal[int] , identifier[WNOHANG] )
keyword[if] keyword[not] identifier[wpid] :
keyword[break]
keyword[except] :
keyword[pass]
|
def handle_chld(self, sig, frame):
"""
SIGCHLD handling
:param sig:
:param frame:
:return:
"""
try:
while True:
(wpid, status) = waitpid(-1, WNOHANG)
if not wpid:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
# self.stdout.write('%d,%d\n' % (wpid, status))
except:
pass # depends on [control=['except'], data=[]]
|
def find_loops( record, index, stop_types = STOP_TYPES, open=None, seen = None ):
"""Find all loops within the index and replace with loop records"""
if open is None:
open = []
if seen is None:
seen = set()
for child in children( record, index, stop_types = stop_types ):
if child['type'] in stop_types or child['type'] == LOOP_TYPE:
continue
if child['address'] in open:
# loop has been found
start = open.index( child['address'] )
new = frozenset( open[start:] )
if new not in seen:
seen.add(new)
yield new
elif child['address'] in seen:
continue
else:
seen.add( child['address'])
open.append( child['address'] )
for loop in find_loops( child, index, stop_types=stop_types, open=open, seen=seen ):
yield loop
open.pop( -1 )
|
def function[find_loops, parameter[record, index, stop_types, open, seen]]:
constant[Find all loops within the index and replace with loop records]
if compare[name[open] is constant[None]] begin[:]
variable[open] assign[=] list[[]]
if compare[name[seen] is constant[None]] begin[:]
variable[seen] assign[=] call[name[set], parameter[]]
for taget[name[child]] in starred[call[name[children], parameter[name[record], name[index]]]] begin[:]
if <ast.BoolOp object at 0x7da204344970> begin[:]
continue
if compare[call[name[child]][constant[address]] in name[open]] begin[:]
variable[start] assign[=] call[name[open].index, parameter[call[name[child]][constant[address]]]]
variable[new] assign[=] call[name[frozenset], parameter[call[name[open]][<ast.Slice object at 0x7da204345c60>]]]
if compare[name[new] <ast.NotIn object at 0x7da2590d7190> name[seen]] begin[:]
call[name[seen].add, parameter[name[new]]]
<ast.Yield object at 0x7da204344c10>
|
keyword[def] identifier[find_loops] ( identifier[record] , identifier[index] , identifier[stop_types] = identifier[STOP_TYPES] , identifier[open] = keyword[None] , identifier[seen] = keyword[None] ):
literal[string]
keyword[if] identifier[open] keyword[is] keyword[None] :
identifier[open] =[]
keyword[if] identifier[seen] keyword[is] keyword[None] :
identifier[seen] = identifier[set] ()
keyword[for] identifier[child] keyword[in] identifier[children] ( identifier[record] , identifier[index] , identifier[stop_types] = identifier[stop_types] ):
keyword[if] identifier[child] [ literal[string] ] keyword[in] identifier[stop_types] keyword[or] identifier[child] [ literal[string] ]== identifier[LOOP_TYPE] :
keyword[continue]
keyword[if] identifier[child] [ literal[string] ] keyword[in] identifier[open] :
identifier[start] = identifier[open] . identifier[index] ( identifier[child] [ literal[string] ])
identifier[new] = identifier[frozenset] ( identifier[open] [ identifier[start] :])
keyword[if] identifier[new] keyword[not] keyword[in] identifier[seen] :
identifier[seen] . identifier[add] ( identifier[new] )
keyword[yield] identifier[new]
keyword[elif] identifier[child] [ literal[string] ] keyword[in] identifier[seen] :
keyword[continue]
keyword[else] :
identifier[seen] . identifier[add] ( identifier[child] [ literal[string] ])
identifier[open] . identifier[append] ( identifier[child] [ literal[string] ])
keyword[for] identifier[loop] keyword[in] identifier[find_loops] ( identifier[child] , identifier[index] , identifier[stop_types] = identifier[stop_types] , identifier[open] = identifier[open] , identifier[seen] = identifier[seen] ):
keyword[yield] identifier[loop]
identifier[open] . identifier[pop] (- literal[int] )
|
def find_loops(record, index, stop_types=STOP_TYPES, open=None, seen=None):
"""Find all loops within the index and replace with loop records"""
if open is None:
open = [] # depends on [control=['if'], data=['open']]
if seen is None:
seen = set() # depends on [control=['if'], data=['seen']]
for child in children(record, index, stop_types=stop_types):
if child['type'] in stop_types or child['type'] == LOOP_TYPE:
continue # depends on [control=['if'], data=[]]
if child['address'] in open: # loop has been found
start = open.index(child['address'])
new = frozenset(open[start:])
if new not in seen:
seen.add(new)
yield new # depends on [control=['if'], data=['new', 'seen']] # depends on [control=['if'], data=['open']]
elif child['address'] in seen:
continue # depends on [control=['if'], data=[]]
else:
seen.add(child['address'])
open.append(child['address'])
for loop in find_loops(child, index, stop_types=stop_types, open=open, seen=seen):
yield loop # depends on [control=['for'], data=['loop']]
open.pop(-1) # depends on [control=['for'], data=['child']]
|
def invoke(self, request):
# type: (ApiClientRequest) -> ApiClientResponse
"""Dispatches a request to an API endpoint described in the
request.
Resolves the method from input request object, converts the
list of header tuples to the required format (dict) for the
`requests` lib call and invokes the method with corresponding
parameters on `requests` library. The response from the call is
wrapped under the `ApiClientResponse` object and the
responsibility of translating a response code and response/
error lies with the caller.
:param request: Request to dispatch to the ApiClient
:type request: ApiClientRequest
:return: Response from the client call
:rtype: ApiClientResponse
:raises: :py:class:`ask_sdk_core.exceptions.ApiClientException`
"""
try:
http_method = self._resolve_method(request)
http_headers = self._convert_list_tuples_to_dict(
headers_list=request.headers)
parsed_url = parse_url(request.url)
if parsed_url.scheme is None or parsed_url.scheme != "https":
raise ApiClientException(
"Requests against non-HTTPS endpoints are not allowed.")
if request.body:
body_content_type = http_headers.get("Content-type", None)
if (body_content_type is not None and
"json" in body_content_type):
raw_data = json.dumps(request.body)
else:
raw_data = request.body
else:
raw_data = None
http_response = http_method(
url=request.url, headers=http_headers, data=raw_data)
return ApiClientResponse(
headers=self._convert_dict_to_list_tuples(
http_response.headers),
status_code=http_response.status_code,
body=http_response.text)
except Exception as e:
raise ApiClientException(
"Error executing the request: {}".format(str(e)))
|
def function[invoke, parameter[self, request]]:
constant[Dispatches a request to an API endpoint described in the
request.
Resolves the method from input request object, converts the
list of header tuples to the required format (dict) for the
`requests` lib call and invokes the method with corresponding
parameters on `requests` library. The response from the call is
wrapped under the `ApiClientResponse` object and the
responsibility of translating a response code and response/
error lies with the caller.
:param request: Request to dispatch to the ApiClient
:type request: ApiClientRequest
:return: Response from the client call
:rtype: ApiClientResponse
:raises: :py:class:`ask_sdk_core.exceptions.ApiClientException`
]
<ast.Try object at 0x7da1b19ef7c0>
|
keyword[def] identifier[invoke] ( identifier[self] , identifier[request] ):
literal[string]
keyword[try] :
identifier[http_method] = identifier[self] . identifier[_resolve_method] ( identifier[request] )
identifier[http_headers] = identifier[self] . identifier[_convert_list_tuples_to_dict] (
identifier[headers_list] = identifier[request] . identifier[headers] )
identifier[parsed_url] = identifier[parse_url] ( identifier[request] . identifier[url] )
keyword[if] identifier[parsed_url] . identifier[scheme] keyword[is] keyword[None] keyword[or] identifier[parsed_url] . identifier[scheme] != literal[string] :
keyword[raise] identifier[ApiClientException] (
literal[string] )
keyword[if] identifier[request] . identifier[body] :
identifier[body_content_type] = identifier[http_headers] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] ( identifier[body_content_type] keyword[is] keyword[not] keyword[None] keyword[and]
literal[string] keyword[in] identifier[body_content_type] ):
identifier[raw_data] = identifier[json] . identifier[dumps] ( identifier[request] . identifier[body] )
keyword[else] :
identifier[raw_data] = identifier[request] . identifier[body]
keyword[else] :
identifier[raw_data] = keyword[None]
identifier[http_response] = identifier[http_method] (
identifier[url] = identifier[request] . identifier[url] , identifier[headers] = identifier[http_headers] , identifier[data] = identifier[raw_data] )
keyword[return] identifier[ApiClientResponse] (
identifier[headers] = identifier[self] . identifier[_convert_dict_to_list_tuples] (
identifier[http_response] . identifier[headers] ),
identifier[status_code] = identifier[http_response] . identifier[status_code] ,
identifier[body] = identifier[http_response] . identifier[text] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[ApiClientException] (
literal[string] . identifier[format] ( identifier[str] ( identifier[e] )))
|
def invoke(self, request):
# type: (ApiClientRequest) -> ApiClientResponse
'Dispatches a request to an API endpoint described in the\n request.\n\n Resolves the method from input request object, converts the\n list of header tuples to the required format (dict) for the\n `requests` lib call and invokes the method with corresponding\n parameters on `requests` library. The response from the call is\n wrapped under the `ApiClientResponse` object and the\n responsibility of translating a response code and response/\n error lies with the caller.\n\n :param request: Request to dispatch to the ApiClient\n :type request: ApiClientRequest\n :return: Response from the client call\n :rtype: ApiClientResponse\n :raises: :py:class:`ask_sdk_core.exceptions.ApiClientException`\n '
try:
http_method = self._resolve_method(request)
http_headers = self._convert_list_tuples_to_dict(headers_list=request.headers)
parsed_url = parse_url(request.url)
if parsed_url.scheme is None or parsed_url.scheme != 'https':
raise ApiClientException('Requests against non-HTTPS endpoints are not allowed.') # depends on [control=['if'], data=[]]
if request.body:
body_content_type = http_headers.get('Content-type', None)
if body_content_type is not None and 'json' in body_content_type:
raw_data = json.dumps(request.body) # depends on [control=['if'], data=[]]
else:
raw_data = request.body # depends on [control=['if'], data=[]]
else:
raw_data = None
http_response = http_method(url=request.url, headers=http_headers, data=raw_data)
return ApiClientResponse(headers=self._convert_dict_to_list_tuples(http_response.headers), status_code=http_response.status_code, body=http_response.text) # depends on [control=['try'], data=[]]
except Exception as e:
raise ApiClientException('Error executing the request: {}'.format(str(e))) # depends on [control=['except'], data=['e']]
|
def get_by_identifier(self, identifier):
"""Gets blocks by identifier
Args:
identifier (str): Should be any of: username, phone_number, email.
See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks
"""
params = {'identifier': identifier}
return self.client.get(self._url(), params=params)
|
def function[get_by_identifier, parameter[self, identifier]]:
constant[Gets blocks by identifier
Args:
identifier (str): Should be any of: username, phone_number, email.
See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b084fc10>], [<ast.Name object at 0x7da1b084f6d0>]]
return[call[name[self].client.get, parameter[call[name[self]._url, parameter[]]]]]
|
keyword[def] identifier[get_by_identifier] ( identifier[self] , identifier[identifier] ):
literal[string]
identifier[params] ={ literal[string] : identifier[identifier] }
keyword[return] identifier[self] . identifier[client] . identifier[get] ( identifier[self] . identifier[_url] (), identifier[params] = identifier[params] )
|
def get_by_identifier(self, identifier):
"""Gets blocks by identifier
Args:
identifier (str): Should be any of: username, phone_number, email.
See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks
"""
params = {'identifier': identifier}
return self.client.get(self._url(), params=params)
|
def getIndent(indentNum):
"""
Cached indent getter function
"""
try:
return _indentCache[indentNum]
except KeyError:
i = "".join([_indent for _ in range(indentNum)])
_indentCache[indentNum] = i
return i
|
def function[getIndent, parameter[indentNum]]:
constant[
Cached indent getter function
]
<ast.Try object at 0x7da1b03fafb0>
|
keyword[def] identifier[getIndent] ( identifier[indentNum] ):
literal[string]
keyword[try] :
keyword[return] identifier[_indentCache] [ identifier[indentNum] ]
keyword[except] identifier[KeyError] :
identifier[i] = literal[string] . identifier[join] ([ identifier[_indent] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[indentNum] )])
identifier[_indentCache] [ identifier[indentNum] ]= identifier[i]
keyword[return] identifier[i]
|
def getIndent(indentNum):
"""
Cached indent getter function
"""
try:
return _indentCache[indentNum] # depends on [control=['try'], data=[]]
except KeyError:
i = ''.join([_indent for _ in range(indentNum)])
_indentCache[indentNum] = i
return i # depends on [control=['except'], data=[]]
|
def render(self, name, value, attrs=None, **kwargs):
"""Widget render method."""
if self.confirm_with:
self.attrs['data-confirm-with'] = 'id_%s' % self.confirm_with
confirmation_markup = """
<div style="margin-top: 10px;" class="hidden password_strength_info">
<p class="text-muted">
<span class="label label-danger">
%s
</span>
<span style="margin-left:5px;">%s</span>
</p>
</div>
""" % (_('Warning'), _("Your passwords don't match."))
try:
self.attrs['class'] = '%s password_confirmation'.strip() % self.attrs['class'] # noqa
except KeyError:
self.attrs['class'] = 'password_confirmation'
return mark_safe(super(PasswordConfirmationInput, self).render( # nosec
name, value, attrs) + confirmation_markup)
|
def function[render, parameter[self, name, value, attrs]]:
constant[Widget render method.]
if name[self].confirm_with begin[:]
call[name[self].attrs][constant[data-confirm-with]] assign[=] binary_operation[constant[id_%s] <ast.Mod object at 0x7da2590d6920> name[self].confirm_with]
variable[confirmation_markup] assign[=] binary_operation[constant[
<div style="margin-top: 10px;" class="hidden password_strength_info">
<p class="text-muted">
<span class="label label-danger">
%s
</span>
<span style="margin-left:5px;">%s</span>
</p>
</div>
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c6c69b0>, <ast.Call object at 0x7da20c6c6e00>]]]
<ast.Try object at 0x7da20c6c5150>
return[call[name[mark_safe], parameter[binary_operation[call[call[name[super], parameter[name[PasswordConfirmationInput], name[self]]].render, parameter[name[name], name[value], name[attrs]]] + name[confirmation_markup]]]]]
|
keyword[def] identifier[render] ( identifier[self] , identifier[name] , identifier[value] , identifier[attrs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[confirm_with] :
identifier[self] . identifier[attrs] [ literal[string] ]= literal[string] % identifier[self] . identifier[confirm_with]
identifier[confirmation_markup] = literal[string] %( identifier[_] ( literal[string] ), identifier[_] ( literal[string] ))
keyword[try] :
identifier[self] . identifier[attrs] [ literal[string] ]= literal[string] . identifier[strip] ()% identifier[self] . identifier[attrs] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[self] . identifier[attrs] [ literal[string] ]= literal[string]
keyword[return] identifier[mark_safe] ( identifier[super] ( identifier[PasswordConfirmationInput] , identifier[self] ). identifier[render] (
identifier[name] , identifier[value] , identifier[attrs] )+ identifier[confirmation_markup] )
|
def render(self, name, value, attrs=None, **kwargs):
"""Widget render method."""
if self.confirm_with:
self.attrs['data-confirm-with'] = 'id_%s' % self.confirm_with # depends on [control=['if'], data=[]]
confirmation_markup = '\n <div style="margin-top: 10px;" class="hidden password_strength_info">\n <p class="text-muted">\n <span class="label label-danger">\n %s\n </span>\n <span style="margin-left:5px;">%s</span>\n </p>\n </div>\n ' % (_('Warning'), _("Your passwords don't match."))
try:
self.attrs['class'] = '%s password_confirmation'.strip() % self.attrs['class'] # noqa # depends on [control=['try'], data=[]]
except KeyError:
self.attrs['class'] = 'password_confirmation' # depends on [control=['except'], data=[]] # nosec
return mark_safe(super(PasswordConfirmationInput, self).render(name, value, attrs) + confirmation_markup)
|
def myRank(grade, badFormat, year, length):
'''rank of candidateNumber in year
Arguments:
grade {int} -- a weighted average for a specific candidate number and year
badFormat {dict} -- candNumber : [results for candidate]
year {int} -- year you are in
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- rank of candidateNumber in year
'''
return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)
|
def function[myRank, parameter[grade, badFormat, year, length]]:
constant[rank of candidateNumber in year
Arguments:
grade {int} -- a weighted average for a specific candidate number and year
badFormat {dict} -- candNumber : [results for candidate]
year {int} -- year you are in
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- rank of candidateNumber in year
]
return[call[name[int], parameter[binary_operation[call[call[name[sorted], parameter[call[name[everyonesAverage], parameter[name[year], name[badFormat], name[length]]]]].index, parameter[name[grade]]] + constant[1]]]]]
|
keyword[def] identifier[myRank] ( identifier[grade] , identifier[badFormat] , identifier[year] , identifier[length] ):
literal[string]
keyword[return] identifier[int] ( identifier[sorted] ( identifier[everyonesAverage] ( identifier[year] , identifier[badFormat] , identifier[length] ), identifier[reverse] = keyword[True] ). identifier[index] ( identifier[grade] )+ literal[int] )
|
def myRank(grade, badFormat, year, length):
"""rank of candidateNumber in year
Arguments:
grade {int} -- a weighted average for a specific candidate number and year
badFormat {dict} -- candNumber : [results for candidate]
year {int} -- year you are in
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- rank of candidateNumber in year
"""
return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)
|
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super().alphabetical_formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self.charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return alph_formula + chg_str
|
def function[alphabetical_formula, parameter[self]]:
constant[
Returns a reduced formula string with appended charge
]
variable[alph_formula] assign[=] call[name[super], parameter[]].alphabetical_formula
variable[chg_str] assign[=] constant[]
if compare[name[self].charge greater[>] constant[0]] begin[:]
variable[chg_str] assign[=] binary_operation[constant[ +] + call[name[formula_double_format], parameter[name[self].charge, constant[False]]]]
return[binary_operation[name[alph_formula] + name[chg_str]]]
|
keyword[def] identifier[alphabetical_formula] ( identifier[self] ):
literal[string]
identifier[alph_formula] = identifier[super] (). identifier[alphabetical_formula]
identifier[chg_str] = literal[string]
keyword[if] identifier[self] . identifier[charge] > literal[int] :
identifier[chg_str] = literal[string] + identifier[formula_double_format] ( identifier[self] . identifier[charge] , keyword[False] )
keyword[elif] identifier[self] . identifier[charge] < literal[int] :
identifier[chg_str] = literal[string] + identifier[formula_double_format] ( identifier[self] . identifier[charge] , keyword[False] )
keyword[return] identifier[alph_formula] + identifier[chg_str]
|
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super().alphabetical_formula
chg_str = ''
if self.charge > 0:
chg_str = ' +' + formula_double_format(self.charge, False) # depends on [control=['if'], data=[]]
elif self.charge < 0:
chg_str = ' ' + formula_double_format(self.charge, False) # depends on [control=['if'], data=[]]
return alph_formula + chg_str
|
def perplexity(eval_data, predictions, scores, learner='ignored'):
'''
Return the perplexity `exp(-score)` computed from each score in `scores`.
The log scores in `scores` should be base e (`exp`, `log`).
The correct average to use for this metric is the geometric mean. It is
recommended to work in log space to calcuate this mean (or use
`scipy.stats.mstats.gmean`):
mean_perplexity = np.exp(np.log(perplexities).mean())
>>> perplexities = perplexity(None, None, [np.log(0.5), np.log(0.1), np.log(0.25)])
>>> [round(p) for p in perplexities]
[2.0, 10.0, 4.0]
'''
return np.exp(-np.array(scores)).tolist()
|
def function[perplexity, parameter[eval_data, predictions, scores, learner]]:
constant[
Return the perplexity `exp(-score)` computed from each score in `scores`.
The log scores in `scores` should be base e (`exp`, `log`).
The correct average to use for this metric is the geometric mean. It is
recommended to work in log space to calcuate this mean (or use
`scipy.stats.mstats.gmean`):
mean_perplexity = np.exp(np.log(perplexities).mean())
>>> perplexities = perplexity(None, None, [np.log(0.5), np.log(0.1), np.log(0.25)])
>>> [round(p) for p in perplexities]
[2.0, 10.0, 4.0]
]
return[call[call[name[np].exp, parameter[<ast.UnaryOp object at 0x7da1b1038250>]].tolist, parameter[]]]
|
keyword[def] identifier[perplexity] ( identifier[eval_data] , identifier[predictions] , identifier[scores] , identifier[learner] = literal[string] ):
literal[string]
keyword[return] identifier[np] . identifier[exp] (- identifier[np] . identifier[array] ( identifier[scores] )). identifier[tolist] ()
|
def perplexity(eval_data, predictions, scores, learner='ignored'):
"""
Return the perplexity `exp(-score)` computed from each score in `scores`.
The log scores in `scores` should be base e (`exp`, `log`).
The correct average to use for this metric is the geometric mean. It is
recommended to work in log space to calcuate this mean (or use
`scipy.stats.mstats.gmean`):
mean_perplexity = np.exp(np.log(perplexities).mean())
>>> perplexities = perplexity(None, None, [np.log(0.5), np.log(0.1), np.log(0.25)])
>>> [round(p) for p in perplexities]
[2.0, 10.0, 4.0]
"""
return np.exp(-np.array(scores)).tolist()
|
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever
except DoneReadingForever:
self.log.debug("done reading")
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except:
char = chr(4).encode()
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char)
return True
except NotYetReadyToRead:
self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and hasattr(chunk, "encode"):
chunk = chunk.encode(self.encoding)
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug("got chunk size %d: %r", len(proc_chunk),
proc_chunk[:30])
self.log.debug("writing chunk to process")
try:
os.write(self.stream, proc_chunk)
except OSError:
self.log.debug("OSError writing stdin chunk")
return True
|
def function[write, parameter[self]]:
constant[ attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" ]
<ast.Try object at 0x7da18ede5c90>
if <ast.BoolOp object at 0x7da18ede6470> begin[:]
variable[chunk] assign[=] call[name[chunk].encode, parameter[name[self].encoding]]
for taget[name[proc_chunk]] in starred[call[name[self].stream_bufferer.process, parameter[name[chunk]]]] begin[:]
call[name[self].log.debug, parameter[constant[got chunk size %d: %r], call[name[len], parameter[name[proc_chunk]]], call[name[proc_chunk]][<ast.Slice object at 0x7da18ede65c0>]]]
call[name[self].log.debug, parameter[constant[writing chunk to process]]]
<ast.Try object at 0x7da18ede5f90>
|
keyword[def] identifier[write] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[chunk] = identifier[self] . identifier[get_chunk] ()
keyword[if] identifier[chunk] keyword[is] keyword[None] :
keyword[raise] identifier[DoneReadingForever]
keyword[except] identifier[DoneReadingForever] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
keyword[if] identifier[self] . identifier[tty_in] :
keyword[try] :
identifier[char] = identifier[termios] . identifier[tcgetattr] ( identifier[self] . identifier[stream] )[ literal[int] ][ identifier[termios] . identifier[VEOF] ]
keyword[except] :
identifier[char] = identifier[chr] ( literal[int] ). identifier[encode] ()
identifier[os] . identifier[write] ( identifier[self] . identifier[stream] , identifier[char] )
identifier[os] . identifier[write] ( identifier[self] . identifier[stream] , identifier[char] )
keyword[return] keyword[True]
keyword[except] identifier[NotYetReadyToRead] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[IS_PY3] keyword[and] identifier[hasattr] ( identifier[chunk] , literal[string] ):
identifier[chunk] = identifier[chunk] . identifier[encode] ( identifier[self] . identifier[encoding] )
keyword[for] identifier[proc_chunk] keyword[in] identifier[self] . identifier[stream_bufferer] . identifier[process] ( identifier[chunk] ):
identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[len] ( identifier[proc_chunk] ),
identifier[proc_chunk] [: literal[int] ])
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[os] . identifier[write] ( identifier[self] . identifier[stream] , identifier[proc_chunk] )
keyword[except] identifier[OSError] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
keyword[return] keyword[True]
|
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except DoneReadingForever:
self.log.debug('done reading')
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF] # depends on [control=['try'], data=[]]
except:
char = chr(4).encode() # depends on [control=['except'], data=[]]
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char) # depends on [control=['if'], data=[]]
return True # depends on [control=['except'], data=[]]
except NotYetReadyToRead:
self.log.debug('received no data')
return False # depends on [control=['except'], data=[]]
# if we're not bytes, make us bytes
if IS_PY3 and hasattr(chunk, 'encode'):
chunk = chunk.encode(self.encoding) # depends on [control=['if'], data=[]]
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug('got chunk size %d: %r', len(proc_chunk), proc_chunk[:30])
self.log.debug('writing chunk to process')
try:
os.write(self.stream, proc_chunk) # depends on [control=['try'], data=[]]
except OSError:
self.log.debug('OSError writing stdin chunk')
return True # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['proc_chunk']]
|
def delete(self, resource, force=False, export_only=None, suppress_device_updates=None, timeout=-1):
"""
Deletes a managed volume.
Args:
resource (dict):
Object to delete.
force:
If set to true, the operation completes despite any problems with
network connectivity or errors on the resource itself. The default is false.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
export_only:
Valid prior to API500. By default, volumes will be deleted from OneView, and storage system.
To delete the volume from OneView only, you must set its value to True.
Setting its value to False has the same behavior as the default behavior.
suppress_device_updates:
Valid API500 onwards. By default, volumes will be deleted from OneView, and storage system.
To delete the volume from OneView only, you must set its value to True.
Setting its value to False has the same behavior as the default behavior.
Returns:
bool: Indicates if the volume was successfully deleted.
"""
custom_headers = {'If-Match': '*'}
if 'uri' in resource:
uri = resource['uri']
else:
uri = self._client.build_uri(resource)
if suppress_device_updates:
uri += '?suppressDeviceUpdates=true'
if export_only:
custom_headers['exportOnly'] = True
return self._client.delete(uri, force=force, timeout=timeout, custom_headers=custom_headers)
|
def function[delete, parameter[self, resource, force, export_only, suppress_device_updates, timeout]]:
constant[
Deletes a managed volume.
Args:
resource (dict):
Object to delete.
force:
If set to true, the operation completes despite any problems with
network connectivity or errors on the resource itself. The default is false.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
export_only:
Valid prior to API500. By default, volumes will be deleted from OneView, and storage system.
To delete the volume from OneView only, you must set its value to True.
Setting its value to False has the same behavior as the default behavior.
suppress_device_updates:
Valid API500 onwards. By default, volumes will be deleted from OneView, and storage system.
To delete the volume from OneView only, you must set its value to True.
Setting its value to False has the same behavior as the default behavior.
Returns:
bool: Indicates if the volume was successfully deleted.
]
variable[custom_headers] assign[=] dictionary[[<ast.Constant object at 0x7da207f9ba60>], [<ast.Constant object at 0x7da207f9bfa0>]]
if compare[constant[uri] in name[resource]] begin[:]
variable[uri] assign[=] call[name[resource]][constant[uri]]
if name[suppress_device_updates] begin[:]
<ast.AugAssign object at 0x7da207f9a140>
if name[export_only] begin[:]
call[name[custom_headers]][constant[exportOnly]] assign[=] constant[True]
return[call[name[self]._client.delete, parameter[name[uri]]]]
|
keyword[def] identifier[delete] ( identifier[self] , identifier[resource] , identifier[force] = keyword[False] , identifier[export_only] = keyword[None] , identifier[suppress_device_updates] = keyword[None] , identifier[timeout] =- literal[int] ):
literal[string]
identifier[custom_headers] ={ literal[string] : literal[string] }
keyword[if] literal[string] keyword[in] identifier[resource] :
identifier[uri] = identifier[resource] [ literal[string] ]
keyword[else] :
identifier[uri] = identifier[self] . identifier[_client] . identifier[build_uri] ( identifier[resource] )
keyword[if] identifier[suppress_device_updates] :
identifier[uri] += literal[string]
keyword[if] identifier[export_only] :
identifier[custom_headers] [ literal[string] ]= keyword[True]
keyword[return] identifier[self] . identifier[_client] . identifier[delete] ( identifier[uri] , identifier[force] = identifier[force] , identifier[timeout] = identifier[timeout] , identifier[custom_headers] = identifier[custom_headers] )
|
def delete(self, resource, force=False, export_only=None, suppress_device_updates=None, timeout=-1):
"""
Deletes a managed volume.
Args:
resource (dict):
Object to delete.
force:
If set to true, the operation completes despite any problems with
network connectivity or errors on the resource itself. The default is false.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
export_only:
Valid prior to API500. By default, volumes will be deleted from OneView, and storage system.
To delete the volume from OneView only, you must set its value to True.
Setting its value to False has the same behavior as the default behavior.
suppress_device_updates:
Valid API500 onwards. By default, volumes will be deleted from OneView, and storage system.
To delete the volume from OneView only, you must set its value to True.
Setting its value to False has the same behavior as the default behavior.
Returns:
bool: Indicates if the volume was successfully deleted.
"""
custom_headers = {'If-Match': '*'}
if 'uri' in resource:
uri = resource['uri'] # depends on [control=['if'], data=['resource']]
else:
uri = self._client.build_uri(resource)
if suppress_device_updates:
uri += '?suppressDeviceUpdates=true' # depends on [control=['if'], data=[]]
if export_only:
custom_headers['exportOnly'] = True # depends on [control=['if'], data=[]]
return self._client.delete(uri, force=force, timeout=timeout, custom_headers=custom_headers)
|
def translate_points(self, points):
""" Translate coordinates and return screen coordinates
Will be returned in order passed as tuples.
:return: list
"""
retval = list()
append = retval.append
sx, sy = self.get_center_offset()
if self._zoom_level == 1.0:
for c in points:
append((c[0] + sx, c[1] + sy))
else:
rx = self._real_ratio_x
ry = self._real_ratio_y
for c in points:
append((int(round((c[0] + sx) * rx)), int(round((c[1] + sy) * ry))))
return retval
|
def function[translate_points, parameter[self, points]]:
constant[ Translate coordinates and return screen coordinates
Will be returned in order passed as tuples.
:return: list
]
variable[retval] assign[=] call[name[list], parameter[]]
variable[append] assign[=] name[retval].append
<ast.Tuple object at 0x7da1b0da3610> assign[=] call[name[self].get_center_offset, parameter[]]
if compare[name[self]._zoom_level equal[==] constant[1.0]] begin[:]
for taget[name[c]] in starred[name[points]] begin[:]
call[name[append], parameter[tuple[[<ast.BinOp object at 0x7da1b0da2d10>, <ast.BinOp object at 0x7da1b0da0400>]]]]
return[name[retval]]
|
keyword[def] identifier[translate_points] ( identifier[self] , identifier[points] ):
literal[string]
identifier[retval] = identifier[list] ()
identifier[append] = identifier[retval] . identifier[append]
identifier[sx] , identifier[sy] = identifier[self] . identifier[get_center_offset] ()
keyword[if] identifier[self] . identifier[_zoom_level] == literal[int] :
keyword[for] identifier[c] keyword[in] identifier[points] :
identifier[append] (( identifier[c] [ literal[int] ]+ identifier[sx] , identifier[c] [ literal[int] ]+ identifier[sy] ))
keyword[else] :
identifier[rx] = identifier[self] . identifier[_real_ratio_x]
identifier[ry] = identifier[self] . identifier[_real_ratio_y]
keyword[for] identifier[c] keyword[in] identifier[points] :
identifier[append] (( identifier[int] ( identifier[round] (( identifier[c] [ literal[int] ]+ identifier[sx] )* identifier[rx] )), identifier[int] ( identifier[round] (( identifier[c] [ literal[int] ]+ identifier[sy] )* identifier[ry] ))))
keyword[return] identifier[retval]
|
def translate_points(self, points):
""" Translate coordinates and return screen coordinates
Will be returned in order passed as tuples.
:return: list
"""
retval = list()
append = retval.append
(sx, sy) = self.get_center_offset()
if self._zoom_level == 1.0:
for c in points:
append((c[0] + sx, c[1] + sy)) # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=[]]
else:
rx = self._real_ratio_x
ry = self._real_ratio_y
for c in points:
append((int(round((c[0] + sx) * rx)), int(round((c[1] + sy) * ry)))) # depends on [control=['for'], data=['c']]
return retval
|
def from_file(cls, path, directory=None, modules=None, active=None):
"""
Instantiate a REPP from a `.rpp` file.
The *path* parameter points to the top-level module. Submodules
are loaded from *directory*. If *directory* is not given, it is
the directory part of *path*.
A REPP module may utilize external submodules, which may be
defined in two ways. The first method is to map a module name
to an instantiated REPP instance in *modules*. The second
method assumes that an external group call `>abc` corresponds
to a file `abc.rpp` in *directory* and loads that file. The
second method only happens if the name (e.g., `abc`) does not
appear in *modules*. Only one module may define a tokenization
pattern.
Args:
path (str): the path to the base REPP file to load
directory (str, optional): the directory in which to search
for submodules
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
"""
name = basename(path)
if name.endswith('.rpp'):
name = name[:-4]
lines = _repp_lines(path)
directory = dirname(path) if directory is None else directory
r = cls(name=name, modules=modules, active=active)
_parse_repp(lines, r, directory)
return r
|
def function[from_file, parameter[cls, path, directory, modules, active]]:
constant[
Instantiate a REPP from a `.rpp` file.
The *path* parameter points to the top-level module. Submodules
are loaded from *directory*. If *directory* is not given, it is
the directory part of *path*.
A REPP module may utilize external submodules, which may be
defined in two ways. The first method is to map a module name
to an instantiated REPP instance in *modules*. The second
method assumes that an external group call `>abc` corresponds
to a file `abc.rpp` in *directory* and loads that file. The
second method only happens if the name (e.g., `abc`) does not
appear in *modules*. Only one module may define a tokenization
pattern.
Args:
path (str): the path to the base REPP file to load
directory (str, optional): the directory in which to search
for submodules
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
]
variable[name] assign[=] call[name[basename], parameter[name[path]]]
if call[name[name].endswith, parameter[constant[.rpp]]] begin[:]
variable[name] assign[=] call[name[name]][<ast.Slice object at 0x7da1b04f4c40>]
variable[lines] assign[=] call[name[_repp_lines], parameter[name[path]]]
variable[directory] assign[=] <ast.IfExp object at 0x7da18f58c100>
variable[r] assign[=] call[name[cls], parameter[]]
call[name[_parse_repp], parameter[name[lines], name[r], name[directory]]]
return[name[r]]
|
keyword[def] identifier[from_file] ( identifier[cls] , identifier[path] , identifier[directory] = keyword[None] , identifier[modules] = keyword[None] , identifier[active] = keyword[None] ):
literal[string]
identifier[name] = identifier[basename] ( identifier[path] )
keyword[if] identifier[name] . identifier[endswith] ( literal[string] ):
identifier[name] = identifier[name] [:- literal[int] ]
identifier[lines] = identifier[_repp_lines] ( identifier[path] )
identifier[directory] = identifier[dirname] ( identifier[path] ) keyword[if] identifier[directory] keyword[is] keyword[None] keyword[else] identifier[directory]
identifier[r] = identifier[cls] ( identifier[name] = identifier[name] , identifier[modules] = identifier[modules] , identifier[active] = identifier[active] )
identifier[_parse_repp] ( identifier[lines] , identifier[r] , identifier[directory] )
keyword[return] identifier[r]
|
def from_file(cls, path, directory=None, modules=None, active=None):
"""
Instantiate a REPP from a `.rpp` file.
The *path* parameter points to the top-level module. Submodules
are loaded from *directory*. If *directory* is not given, it is
the directory part of *path*.
A REPP module may utilize external submodules, which may be
defined in two ways. The first method is to map a module name
to an instantiated REPP instance in *modules*. The second
method assumes that an external group call `>abc` corresponds
to a file `abc.rpp` in *directory* and loads that file. The
second method only happens if the name (e.g., `abc`) does not
appear in *modules*. Only one module may define a tokenization
pattern.
Args:
path (str): the path to the base REPP file to load
directory (str, optional): the directory in which to search
for submodules
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
"""
name = basename(path)
if name.endswith('.rpp'):
name = name[:-4] # depends on [control=['if'], data=[]]
lines = _repp_lines(path)
directory = dirname(path) if directory is None else directory
r = cls(name=name, modules=modules, active=active)
_parse_repp(lines, r, directory)
return r
|
def get_request_id(self, renew=False):
"""
:Brief: This method is used in every place to get the already generated request ID or
generate new request ID and sent off
"""
if not AppRequest.__request_id or renew:
self.set_request_id(uuid.uuid1())
return AppRequest.__request_id
|
def function[get_request_id, parameter[self, renew]]:
constant[
:Brief: This method is used in every place to get the already generated request ID or
generate new request ID and sent off
]
if <ast.BoolOp object at 0x7da1b26aca30> begin[:]
call[name[self].set_request_id, parameter[call[name[uuid].uuid1, parameter[]]]]
return[name[AppRequest].__request_id]
|
keyword[def] identifier[get_request_id] ( identifier[self] , identifier[renew] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[AppRequest] . identifier[__request_id] keyword[or] identifier[renew] :
identifier[self] . identifier[set_request_id] ( identifier[uuid] . identifier[uuid1] ())
keyword[return] identifier[AppRequest] . identifier[__request_id]
|
def get_request_id(self, renew=False):
"""
:Brief: This method is used in every place to get the already generated request ID or
generate new request ID and sent off
"""
if not AppRequest.__request_id or renew:
self.set_request_id(uuid.uuid1()) # depends on [control=['if'], data=[]]
return AppRequest.__request_id
|
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
VShadowPathSpec: a path specification.
"""
location = getattr(self.path_spec, 'location', None)
store_index = getattr(self.path_spec, 'store_index', None)
# Only the virtual root file has directory entries.
if (store_index is None and location is not None and
location == self._file_system.LOCATION_ROOT):
vshadow_volume = self._file_system.GetVShadowVolume()
for store_index in range(0, vshadow_volume.number_of_stores):
yield vshadow_path_spec.VShadowPathSpec(
location='/vss{0:d}'.format(store_index + 1),
store_index=store_index, parent=self.path_spec.parent)
|
def function[_EntriesGenerator, parameter[self]]:
constant[Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
VShadowPathSpec: a path specification.
]
variable[location] assign[=] call[name[getattr], parameter[name[self].path_spec, constant[location], constant[None]]]
variable[store_index] assign[=] call[name[getattr], parameter[name[self].path_spec, constant[store_index], constant[None]]]
if <ast.BoolOp object at 0x7da1b07bac80> begin[:]
variable[vshadow_volume] assign[=] call[name[self]._file_system.GetVShadowVolume, parameter[]]
for taget[name[store_index]] in starred[call[name[range], parameter[constant[0], name[vshadow_volume].number_of_stores]]] begin[:]
<ast.Yield object at 0x7da1b07a9810>
|
keyword[def] identifier[_EntriesGenerator] ( identifier[self] ):
literal[string]
identifier[location] = identifier[getattr] ( identifier[self] . identifier[path_spec] , literal[string] , keyword[None] )
identifier[store_index] = identifier[getattr] ( identifier[self] . identifier[path_spec] , literal[string] , keyword[None] )
keyword[if] ( identifier[store_index] keyword[is] keyword[None] keyword[and] identifier[location] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[location] == identifier[self] . identifier[_file_system] . identifier[LOCATION_ROOT] ):
identifier[vshadow_volume] = identifier[self] . identifier[_file_system] . identifier[GetVShadowVolume] ()
keyword[for] identifier[store_index] keyword[in] identifier[range] ( literal[int] , identifier[vshadow_volume] . identifier[number_of_stores] ):
keyword[yield] identifier[vshadow_path_spec] . identifier[VShadowPathSpec] (
identifier[location] = literal[string] . identifier[format] ( identifier[store_index] + literal[int] ),
identifier[store_index] = identifier[store_index] , identifier[parent] = identifier[self] . identifier[path_spec] . identifier[parent] )
|
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
VShadowPathSpec: a path specification.
"""
location = getattr(self.path_spec, 'location', None)
store_index = getattr(self.path_spec, 'store_index', None)
# Only the virtual root file has directory entries.
if store_index is None and location is not None and (location == self._file_system.LOCATION_ROOT):
vshadow_volume = self._file_system.GetVShadowVolume()
for store_index in range(0, vshadow_volume.number_of_stores):
yield vshadow_path_spec.VShadowPathSpec(location='/vss{0:d}'.format(store_index + 1), store_index=store_index, parent=self.path_spec.parent) # depends on [control=['for'], data=['store_index']] # depends on [control=['if'], data=[]]
|
def get_replies(self, new=True):
"""
Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications.
"""
url = (self._imgur._base_url + "/3/account/{0}/"
"notifications/replies".format(self.name))
return self._imgur._send_request(url, needs_auth=True)
|
def function[get_replies, parameter[self, new]]:
constant[
Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications.
]
variable[url] assign[=] binary_operation[name[self]._imgur._base_url + call[constant[/3/account/{0}/notifications/replies].format, parameter[name[self].name]]]
return[call[name[self]._imgur._send_request, parameter[name[url]]]]
|
keyword[def] identifier[get_replies] ( identifier[self] , identifier[new] = keyword[True] ):
literal[string]
identifier[url] =( identifier[self] . identifier[_imgur] . identifier[_base_url] + literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
keyword[return] identifier[self] . identifier[_imgur] . identifier[_send_request] ( identifier[url] , identifier[needs_auth] = keyword[True] )
|
def get_replies(self, new=True):
"""
Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications.
"""
url = self._imgur._base_url + '/3/account/{0}/notifications/replies'.format(self.name)
return self._imgur._send_request(url, needs_auth=True)
|
def run(expnum, ccd, version, dry_run=False, prefix="", force=False):
"""Run the OSSOS jmpmakepsf script.
"""
message = storage.SUCCESS
if storage.get_status(task, prefix, expnum, version=version, ccd=ccd) and not force:
logging.info("{} completed successfully for {} {} {} {}".format(task, prefix, expnum, version, ccd))
return
with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run):
try:
if not storage.get_status(dependency, prefix, expnum, "p", ccd=ccd):
raise IOError("{} not yet run for {}".format(dependency, expnum))
# confirm destination directory exists.
destdir = os.path.dirname(
storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits'))
if not dry_run:
storage.mkdir(destdir)
# get image from the vospace storage area
logging.info("Getting fits image from VOSpace")
filename = storage.get_image(expnum, ccd, version=version, prefix=prefix)
# get mopheader from the vospace storage area
logging.info("Getting mopheader from VOSpace")
mopheader_filename = storage.get_file(expnum, ccd, version=version, prefix=prefix, ext='mopheader')
# run mkpsf process
logging.info("Running mkpsf on %s %d" % (expnum, ccd))
logging.info(util.exec_prog(['jmpmakepsf.csh',
'./',
filename,
'yes', 'yes']))
if dry_run:
return
# place the results into VOSpace
basename = os.path.splitext(filename)[0]
for ext in ('mopheader', 'psf.fits',
'zeropoint.used', 'apcor', 'fwhm', 'phot'):
dest = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext=ext)
source = basename + "." + str(ext)
count = 0
with open(source, 'r'):
while True:
count += 1
try:
logging.info("Attempt {} to copy {} -> {}".format(count, source, dest))
storage.copy(source, dest)
break
except Exception as ex:
if count > 10:
raise ex
# set some data parameters associated with the image, determined in this step.
storage.set_status('fwhm', prefix, expnum, version=version, ccd=ccd, status=str(storage.get_fwhm(
expnum, ccd=ccd, prefix=prefix, version=version)))
storage.set_status('zeropoint', prefix, expnum, version=version, ccd=ccd,
status=str(storage.get_zeropoint(
expnum, ccd=ccd, prefix=prefix, version=version)))
logging.info(message)
except Exception as e:
message = str(e)
logging.error(message)
storage.set_status(task, prefix, expnum, version, ccd=ccd, status=message)
return
|
def function[run, parameter[expnum, ccd, version, dry_run, prefix, force]]:
constant[Run the OSSOS jmpmakepsf script.
]
variable[message] assign[=] name[storage].SUCCESS
if <ast.BoolOp object at 0x7da1b1a4bd90> begin[:]
call[name[logging].info, parameter[call[constant[{} completed successfully for {} {} {} {}].format, parameter[name[task], name[prefix], name[expnum], name[version], name[ccd]]]]]
return[None]
with call[name[storage].LoggingManager, parameter[name[task], name[prefix], name[expnum], name[ccd], name[version], name[dry_run]]] begin[:]
<ast.Try object at 0x7da1b1b0cdc0>
call[name[storage].set_status, parameter[name[task], name[prefix], name[expnum], name[version]]]
return[None]
|
keyword[def] identifier[run] ( identifier[expnum] , identifier[ccd] , identifier[version] , identifier[dry_run] = keyword[False] , identifier[prefix] = literal[string] , identifier[force] = keyword[False] ):
literal[string]
identifier[message] = identifier[storage] . identifier[SUCCESS]
keyword[if] identifier[storage] . identifier[get_status] ( identifier[task] , identifier[prefix] , identifier[expnum] , identifier[version] = identifier[version] , identifier[ccd] = identifier[ccd] ) keyword[and] keyword[not] identifier[force] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[task] , identifier[prefix] , identifier[expnum] , identifier[version] , identifier[ccd] ))
keyword[return]
keyword[with] identifier[storage] . identifier[LoggingManager] ( identifier[task] , identifier[prefix] , identifier[expnum] , identifier[ccd] , identifier[version] , identifier[dry_run] ):
keyword[try] :
keyword[if] keyword[not] identifier[storage] . identifier[get_status] ( identifier[dependency] , identifier[prefix] , identifier[expnum] , literal[string] , identifier[ccd] = identifier[ccd] ):
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[dependency] , identifier[expnum] ))
identifier[destdir] = identifier[os] . identifier[path] . identifier[dirname] (
identifier[storage] . identifier[dbimages_uri] ( identifier[expnum] , identifier[ccd] , identifier[prefix] = identifier[prefix] , identifier[version] = identifier[version] , identifier[ext] = literal[string] ))
keyword[if] keyword[not] identifier[dry_run] :
identifier[storage] . identifier[mkdir] ( identifier[destdir] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[filename] = identifier[storage] . identifier[get_image] ( identifier[expnum] , identifier[ccd] , identifier[version] = identifier[version] , identifier[prefix] = identifier[prefix] )
identifier[logging] . identifier[info] ( literal[string] )
identifier[mopheader_filename] = identifier[storage] . identifier[get_file] ( identifier[expnum] , identifier[ccd] , identifier[version] = identifier[version] , identifier[prefix] = identifier[prefix] , identifier[ext] = literal[string] )
identifier[logging] . identifier[info] ( literal[string] %( identifier[expnum] , identifier[ccd] ))
identifier[logging] . identifier[info] ( identifier[util] . identifier[exec_prog] ([ literal[string] ,
literal[string] ,
identifier[filename] ,
literal[string] , literal[string] ]))
keyword[if] identifier[dry_run] :
keyword[return]
identifier[basename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]
keyword[for] identifier[ext] keyword[in] ( literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[dest] = identifier[storage] . identifier[dbimages_uri] ( identifier[expnum] , identifier[ccd] , identifier[prefix] = identifier[prefix] , identifier[version] = identifier[version] , identifier[ext] = identifier[ext] )
identifier[source] = identifier[basename] + literal[string] + identifier[str] ( identifier[ext] )
identifier[count] = literal[int]
keyword[with] identifier[open] ( identifier[source] , literal[string] ):
keyword[while] keyword[True] :
identifier[count] += literal[int]
keyword[try] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[count] , identifier[source] , identifier[dest] ))
identifier[storage] . identifier[copy] ( identifier[source] , identifier[dest] )
keyword[break]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] identifier[count] > literal[int] :
keyword[raise] identifier[ex]
identifier[storage] . identifier[set_status] ( literal[string] , identifier[prefix] , identifier[expnum] , identifier[version] = identifier[version] , identifier[ccd] = identifier[ccd] , identifier[status] = identifier[str] ( identifier[storage] . identifier[get_fwhm] (
identifier[expnum] , identifier[ccd] = identifier[ccd] , identifier[prefix] = identifier[prefix] , identifier[version] = identifier[version] )))
identifier[storage] . identifier[set_status] ( literal[string] , identifier[prefix] , identifier[expnum] , identifier[version] = identifier[version] , identifier[ccd] = identifier[ccd] ,
identifier[status] = identifier[str] ( identifier[storage] . identifier[get_zeropoint] (
identifier[expnum] , identifier[ccd] = identifier[ccd] , identifier[prefix] = identifier[prefix] , identifier[version] = identifier[version] )))
identifier[logging] . identifier[info] ( identifier[message] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[message] = identifier[str] ( identifier[e] )
identifier[logging] . identifier[error] ( identifier[message] )
identifier[storage] . identifier[set_status] ( identifier[task] , identifier[prefix] , identifier[expnum] , identifier[version] , identifier[ccd] = identifier[ccd] , identifier[status] = identifier[message] )
keyword[return]
|
def run(expnum, ccd, version, dry_run=False, prefix='', force=False):
"""Run the OSSOS jmpmakepsf script.
"""
message = storage.SUCCESS
if storage.get_status(task, prefix, expnum, version=version, ccd=ccd) and (not force):
logging.info('{} completed successfully for {} {} {} {}'.format(task, prefix, expnum, version, ccd))
return # depends on [control=['if'], data=[]]
with storage.LoggingManager(task, prefix, expnum, ccd, version, dry_run):
try:
if not storage.get_status(dependency, prefix, expnum, 'p', ccd=ccd):
raise IOError('{} not yet run for {}'.format(dependency, expnum)) # depends on [control=['if'], data=[]]
# confirm destination directory exists.
destdir = os.path.dirname(storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext='fits'))
if not dry_run:
storage.mkdir(destdir) # depends on [control=['if'], data=[]]
# get image from the vospace storage area
logging.info('Getting fits image from VOSpace')
filename = storage.get_image(expnum, ccd, version=version, prefix=prefix)
# get mopheader from the vospace storage area
logging.info('Getting mopheader from VOSpace')
mopheader_filename = storage.get_file(expnum, ccd, version=version, prefix=prefix, ext='mopheader')
# run mkpsf process
logging.info('Running mkpsf on %s %d' % (expnum, ccd))
logging.info(util.exec_prog(['jmpmakepsf.csh', './', filename, 'yes', 'yes']))
if dry_run:
return # depends on [control=['if'], data=[]]
# place the results into VOSpace
basename = os.path.splitext(filename)[0]
for ext in ('mopheader', 'psf.fits', 'zeropoint.used', 'apcor', 'fwhm', 'phot'):
dest = storage.dbimages_uri(expnum, ccd, prefix=prefix, version=version, ext=ext)
source = basename + '.' + str(ext)
count = 0
with open(source, 'r'):
while True:
count += 1
try:
logging.info('Attempt {} to copy {} -> {}'.format(count, source, dest))
storage.copy(source, dest)
break # depends on [control=['try'], data=[]]
except Exception as ex:
if count > 10:
raise ex # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] # depends on [control=['while'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['ext']]
# set some data parameters associated with the image, determined in this step.
storage.set_status('fwhm', prefix, expnum, version=version, ccd=ccd, status=str(storage.get_fwhm(expnum, ccd=ccd, prefix=prefix, version=version)))
storage.set_status('zeropoint', prefix, expnum, version=version, ccd=ccd, status=str(storage.get_zeropoint(expnum, ccd=ccd, prefix=prefix, version=version)))
logging.info(message) # depends on [control=['try'], data=[]]
except Exception as e:
message = str(e)
logging.error(message) # depends on [control=['except'], data=['e']]
storage.set_status(task, prefix, expnum, version, ccd=ccd, status=message) # depends on [control=['with'], data=[]]
return
|
def __fix_context(context):
"""Return a new context dict based on original context.
The new context will be a copy of the original, and some mutable
members (such as script and css files) will also be copied to
prevent polluting shared context.
"""
COPY_LISTS = ('script_files', 'css_files',)
for attr in COPY_LISTS:
if attr in context:
context[attr] = context[attr][:]
return context
|
def function[__fix_context, parameter[context]]:
constant[Return a new context dict based on original context.
The new context will be a copy of the original, and some mutable
members (such as script and css files) will also be copied to
prevent polluting shared context.
]
variable[COPY_LISTS] assign[=] tuple[[<ast.Constant object at 0x7da1b033bfd0>, <ast.Constant object at 0x7da1b0338970>]]
for taget[name[attr]] in starred[name[COPY_LISTS]] begin[:]
if compare[name[attr] in name[context]] begin[:]
call[name[context]][name[attr]] assign[=] call[call[name[context]][name[attr]]][<ast.Slice object at 0x7da1b0339180>]
return[name[context]]
|
keyword[def] identifier[__fix_context] ( identifier[context] ):
literal[string]
identifier[COPY_LISTS] =( literal[string] , literal[string] ,)
keyword[for] identifier[attr] keyword[in] identifier[COPY_LISTS] :
keyword[if] identifier[attr] keyword[in] identifier[context] :
identifier[context] [ identifier[attr] ]= identifier[context] [ identifier[attr] ][:]
keyword[return] identifier[context]
|
def __fix_context(context):
"""Return a new context dict based on original context.
The new context will be a copy of the original, and some mutable
members (such as script and css files) will also be copied to
prevent polluting shared context.
"""
COPY_LISTS = ('script_files', 'css_files')
for attr in COPY_LISTS:
if attr in context:
context[attr] = context[attr][:] # depends on [control=['if'], data=['attr', 'context']] # depends on [control=['for'], data=['attr']]
return context
|
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms)
|
def function[_any_pandas_objects, parameter[terms]]:
constant[Check a sequence of terms for instances of PandasObject.]
return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da18c4cfe80>]]]
|
keyword[def] identifier[_any_pandas_objects] ( identifier[terms] ):
literal[string]
keyword[return] identifier[any] ( identifier[isinstance] ( identifier[term] . identifier[value] , identifier[pd] . identifier[core] . identifier[generic] . identifier[PandasObject] )
keyword[for] identifier[term] keyword[in] identifier[terms] )
|
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any((isinstance(term.value, pd.core.generic.PandasObject) for term in terms))
|
def display(self):
"""Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz)
"""
from IPython.core.display import display, HTML
display(HTML(self._repr_html_()))
|
def function[display, parameter[self]]:
constant[Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz)
]
from relative_module[IPython.core.display] import module[display], module[HTML]
call[name[display], parameter[call[name[HTML], parameter[call[name[self]._repr_html_, parameter[]]]]]]
|
keyword[def] identifier[display] ( identifier[self] ):
literal[string]
keyword[from] identifier[IPython] . identifier[core] . identifier[display] keyword[import] identifier[display] , identifier[HTML]
identifier[display] ( identifier[HTML] ( identifier[self] . identifier[_repr_html_] ()))
|
def display(self):
"""Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz)
"""
from IPython.core.display import display, HTML
display(HTML(self._repr_html_()))
|
def _run_svn(cmd, cwd, user, username, password, opts, **kwargs):
'''
Execute svn
return the output of the command
cmd
The command to run.
cwd
The path to the Subversion repository
user
Run svn as a user other than what the minion runs as
username
Connect to the Subversion server as another user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
opts
Any additional options to add to the command line
kwargs
Additional options to pass to the run-cmd
'''
cmd = ['svn', '--non-interactive', cmd]
options = list(opts)
if username:
options.extend(['--username', username])
if password:
options.extend(['--password', password])
cmd.extend(options)
result = __salt__['cmd.run_all'](cmd, python_shell=False, cwd=cwd, runas=user, **kwargs)
retcode = result['retcode']
if retcode == 0:
return result['stdout']
raise CommandExecutionError(result['stderr'] + '\n\n' + ' '.join(cmd))
|
def function[_run_svn, parameter[cmd, cwd, user, username, password, opts]]:
constant[
Execute svn
return the output of the command
cmd
The command to run.
cwd
The path to the Subversion repository
user
Run svn as a user other than what the minion runs as
username
Connect to the Subversion server as another user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
opts
Any additional options to add to the command line
kwargs
Additional options to pass to the run-cmd
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c6e4430>, <ast.Constant object at 0x7da20c6e79d0>, <ast.Name object at 0x7da20c6e7130>]]
variable[options] assign[=] call[name[list], parameter[name[opts]]]
if name[username] begin[:]
call[name[options].extend, parameter[list[[<ast.Constant object at 0x7da20c6e7be0>, <ast.Name object at 0x7da20c6e75b0>]]]]
if name[password] begin[:]
call[name[options].extend, parameter[list[[<ast.Constant object at 0x7da20c6e6950>, <ast.Name object at 0x7da20c6e7ca0>]]]]
call[name[cmd].extend, parameter[name[options]]]
variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
variable[retcode] assign[=] call[name[result]][constant[retcode]]
if compare[name[retcode] equal[==] constant[0]] begin[:]
return[call[name[result]][constant[stdout]]]
<ast.Raise object at 0x7da20c6e7fd0>
|
keyword[def] identifier[_run_svn] ( identifier[cmd] , identifier[cwd] , identifier[user] , identifier[username] , identifier[password] , identifier[opts] ,** identifier[kwargs] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , identifier[cmd] ]
identifier[options] = identifier[list] ( identifier[opts] )
keyword[if] identifier[username] :
identifier[options] . identifier[extend] ([ literal[string] , identifier[username] ])
keyword[if] identifier[password] :
identifier[options] . identifier[extend] ([ literal[string] , identifier[password] ])
identifier[cmd] . identifier[extend] ( identifier[options] )
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] , identifier[cwd] = identifier[cwd] , identifier[runas] = identifier[user] ,** identifier[kwargs] )
identifier[retcode] = identifier[result] [ literal[string] ]
keyword[if] identifier[retcode] == literal[int] :
keyword[return] identifier[result] [ literal[string] ]
keyword[raise] identifier[CommandExecutionError] ( identifier[result] [ literal[string] ]+ literal[string] + literal[string] . identifier[join] ( identifier[cmd] ))
|
def _run_svn(cmd, cwd, user, username, password, opts, **kwargs):
"""
Execute svn
return the output of the command
cmd
The command to run.
cwd
The path to the Subversion repository
user
Run svn as a user other than what the minion runs as
username
Connect to the Subversion server as another user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
opts
Any additional options to add to the command line
kwargs
Additional options to pass to the run-cmd
"""
cmd = ['svn', '--non-interactive', cmd]
options = list(opts)
if username:
options.extend(['--username', username]) # depends on [control=['if'], data=[]]
if password:
options.extend(['--password', password]) # depends on [control=['if'], data=[]]
cmd.extend(options)
result = __salt__['cmd.run_all'](cmd, python_shell=False, cwd=cwd, runas=user, **kwargs)
retcode = result['retcode']
if retcode == 0:
return result['stdout'] # depends on [control=['if'], data=[]]
raise CommandExecutionError(result['stderr'] + '\n\n' + ' '.join(cmd))
|
def search_profiles(
self,
parent,
request_metadata,
profile_query=None,
page_size=None,
offset=None,
disable_spell_check=None,
order_by=None,
case_sensitive_sort=None,
histogram_queries=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Searches for profiles within a tenant.
For example, search by raw queries "software engineer in Mountain View"
or search by structured filters (location filter, education filter,
etc.).
See ``SearchProfilesRequest`` for more information.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ProfileServiceClient()
>>>
>>> parent = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> # TODO: Initialize `request_metadata`:
>>> request_metadata = {}
>>>
>>> # Iterate over all results
>>> for element in client.search_profiles(parent, request_metadata):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.search_profiles(parent, request_metadata).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The resource name of the tenant to search within.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenants/foo".
request_metadata (Union[dict, ~google.cloud.talent_v4beta1.types.RequestMetadata]): Required.
The meta information collected about the profile search user. This is used
to improve the search quality of the service. These values are provided by
users, and must be precise and consistent.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.RequestMetadata`
profile_query (Union[dict, ~google.cloud.talent_v4beta1.types.ProfileQuery]): Optional.
Search query to execute. See ``ProfileQuery`` for more details.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.ProfileQuery`
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
offset (int): Optional.
An integer that specifies the current offset (that is, starting result)
in search results. This field is only considered if ``page_token`` is
unset.
The maximum allowed value is 5000. Otherwise an error is thrown.
For example, 0 means to search from the first profile, and 10 means to
search from the 11th profile. This can be used for pagination, for
example pageSize = 10 and offset = 10 means to search from the second
page.
disable_spell_check (bool): Optional.
This flag controls the spell-check feature. If ``false``, the service
attempts to correct a misspelled query.
For example, "enginee" is corrected to "engineer".
order_by (str): Optional.
The criteria that determines how search results are sorted. Defaults is
"relevance desc" if no value is specified.
Supported options are:
- "relevance desc": By descending relevance, as determined by the API
algorithms.
- "update\_date desc": Sort by ``Profile.update_date`` in descending
order (recently updated profiles first).
- "create\_date desc": Sort by ``Profile.create_date`` in descending
order (recently created profiles first).
- "first\_name": Sort by ``PersonStrcuturedName.given_name`` in
ascending order.
- "first\_name desc": Sort by ``PersonStrcuturedName.given_name`` in
descending order.
- "last\_name": Sort by ``PersonStrcuturedName.family_name`` in
ascending order.
- "last\_name desc": Sort by ``PersonStrcuturedName.family_name`` in
ascending order.
case_sensitive_sort (bool): Optional.
When sort by field is based on alphabetical order, sort values case
sensitively (based on ASCII) when the value is set to true. Default value
is case in-sensitive sort (false).
histogram_queries (list[Union[dict, ~google.cloud.talent_v4beta1.types.HistogramQuery]]): Optional.
A list of expressions specifies histogram requests against matching
profiles for ``SearchProfilesRequest``.
The expression syntax looks like a function definition with optional
parameters.
Function syntax: function\_name(histogram\_facet[, list of buckets])
Data types:
- Histogram facet: facet names with format [a-zA-Z][a-zA-Z0-9\_]+.
- String: string like "any string with backslash escape for quote(")."
- Number: whole number and floating point number like 10, -1 and -0.01.
- List: list of elements with comma(,) separator surrounded by square
brackets. For example, [1, 2, 3] and ["one", "two", "three"].
Built-in constants:
- MIN (minimum number similar to java Double.MIN\_VALUE)
- MAX (maximum number similar to java Double.MAX\_VALUE)
Built-in functions:
- bucket(start, end[, label]) Bucket build-in function creates a bucket
with range of \`start, end). Note that the end is exclusive. For
example, bucket(1, MAX, "positive number") or bucket(1, 10).
Histogram Facets:
- admin1: Admin1 is a global placeholder for referring to state,
province, or the particular term a country uses to define the
geographic structure below the country level. Examples include states
codes such as "CA", "IL", "NY", and provinces, such as "BC".
- locality: Locality is a global placeholder for referring to city,
town, or the particular term a country uses to define the geographic
structure below the admin1 level. Examples include city names such as
"Mountain View" and "New York".
- extended\_locality: Extended locality is concatenated version of
admin1 and locality with comma separator. For example, "Mountain
View, CA" and "New York, NY".
- postal\_code: Postal code of profile which follows locale code.
- country: Country code (ISO-3166-1 alpha-2 code) of profile, such as
US, JP, GB.
- job\_title: Normalized job titles specified in EmploymentHistory.
- company\_name: Normalized company name of profiles to match on.
- institution: The school name. For example, "MIT", "University of
California, Berkeley"
- degree: Highest education degree in ISCED code. Each value in degree
covers specific level of education, without any expansion to upper
nor lower levels of education degree.
- experience\_in\_months: experience in months. 0 means 0 month to 1
month (exclusive).
- application\_date: The application date specifies application start
dates. See [ApplicationDateFilter\` for more details.
- application\_outcome\_reason: The application outcome reason
specifies the outcome reasons of job application. See
``ApplicationOutcomeReasonFilter`` for more details.
- application\_last\_stage: The application last stage specifies the
last stage of job application. See ``ApplicationLastStageFilter`` for
more details.
- application\_job\_title: The application job title specifies the job
applied for in the application. See ``ApplicationJobFilter`` for more
details.
- application\_status: The application status specifies the status of
job application. See ``ApplicationStatusFilter`` for more details.
- hirable\_status: Hirable status specifies the profile's hirable
status.
- string\_custom\_attribute: String custom attributes. Values can be
accessed via square bracket notation like
string\_custom\_attribute["key1"].
- numeric\_custom\_attribute: Numeric custom attributes. Values can be
accessed via square bracket notation like
numeric\_custom\_attribute["key1"].
Example expressions:
- count(admin1)
- count(experience\_in\_months, [bucket(0, 12, "1 year"), bucket(12,
36, "1-3 years"), bucket(36, MAX, "3+ years")])
- count(string\_custom\_attribute["assigned\_recruiter"])
- count(numeric\_custom\_attribute["favorite\_number"], [bucket(MIN, 0,
"negative"), bucket(0, MAX, "non-negative")])
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.HistogramQuery`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.talent_v4beta1.types.HistogramQueryResult` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "search_profiles" not in self._inner_api_calls:
self._inner_api_calls[
"search_profiles"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.search_profiles,
default_retry=self._method_configs["SearchProfiles"].retry,
default_timeout=self._method_configs["SearchProfiles"].timeout,
client_info=self._client_info,
)
request = profile_service_pb2.SearchProfilesRequest(
parent=parent,
request_metadata=request_metadata,
profile_query=profile_query,
page_size=page_size,
offset=offset,
disable_spell_check=disable_spell_check,
order_by=order_by,
case_sensitive_sort=case_sensitive_sort,
histogram_queries=histogram_queries,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["search_profiles"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="histogram_query_results",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
|
def function[search_profiles, parameter[self, parent, request_metadata, profile_query, page_size, offset, disable_spell_check, order_by, case_sensitive_sort, histogram_queries, retry, timeout, metadata]]:
constant[
Searches for profiles within a tenant.
For example, search by raw queries "software engineer in Mountain View"
or search by structured filters (location filter, education filter,
etc.).
See ``SearchProfilesRequest`` for more information.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ProfileServiceClient()
>>>
>>> parent = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> # TODO: Initialize `request_metadata`:
>>> request_metadata = {}
>>>
>>> # Iterate over all results
>>> for element in client.search_profiles(parent, request_metadata):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.search_profiles(parent, request_metadata).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The resource name of the tenant to search within.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenants/foo".
request_metadata (Union[dict, ~google.cloud.talent_v4beta1.types.RequestMetadata]): Required.
The meta information collected about the profile search user. This is used
to improve the search quality of the service. These values are provided by
users, and must be precise and consistent.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.RequestMetadata`
profile_query (Union[dict, ~google.cloud.talent_v4beta1.types.ProfileQuery]): Optional.
Search query to execute. See ``ProfileQuery`` for more details.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.ProfileQuery`
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
offset (int): Optional.
An integer that specifies the current offset (that is, starting result)
in search results. This field is only considered if ``page_token`` is
unset.
The maximum allowed value is 5000. Otherwise an error is thrown.
For example, 0 means to search from the first profile, and 10 means to
search from the 11th profile. This can be used for pagination, for
example pageSize = 10 and offset = 10 means to search from the second
page.
disable_spell_check (bool): Optional.
This flag controls the spell-check feature. If ``false``, the service
attempts to correct a misspelled query.
For example, "enginee" is corrected to "engineer".
order_by (str): Optional.
The criteria that determines how search results are sorted. Defaults is
"relevance desc" if no value is specified.
Supported options are:
- "relevance desc": By descending relevance, as determined by the API
algorithms.
- "update\_date desc": Sort by ``Profile.update_date`` in descending
order (recently updated profiles first).
- "create\_date desc": Sort by ``Profile.create_date`` in descending
order (recently created profiles first).
- "first\_name": Sort by ``PersonStrcuturedName.given_name`` in
ascending order.
- "first\_name desc": Sort by ``PersonStrcuturedName.given_name`` in
descending order.
- "last\_name": Sort by ``PersonStrcuturedName.family_name`` in
ascending order.
- "last\_name desc": Sort by ``PersonStrcuturedName.family_name`` in
ascending order.
case_sensitive_sort (bool): Optional.
When sort by field is based on alphabetical order, sort values case
sensitively (based on ASCII) when the value is set to true. Default value
is case in-sensitive sort (false).
histogram_queries (list[Union[dict, ~google.cloud.talent_v4beta1.types.HistogramQuery]]): Optional.
A list of expressions specifies histogram requests against matching
profiles for ``SearchProfilesRequest``.
The expression syntax looks like a function definition with optional
parameters.
Function syntax: function\_name(histogram\_facet[, list of buckets])
Data types:
- Histogram facet: facet names with format [a-zA-Z][a-zA-Z0-9\_]+.
- String: string like "any string with backslash escape for quote(")."
- Number: whole number and floating point number like 10, -1 and -0.01.
- List: list of elements with comma(,) separator surrounded by square
brackets. For example, [1, 2, 3] and ["one", "two", "three"].
Built-in constants:
- MIN (minimum number similar to java Double.MIN\_VALUE)
- MAX (maximum number similar to java Double.MAX\_VALUE)
Built-in functions:
- bucket(start, end[, label]) Bucket build-in function creates a bucket
with range of \`start, end). Note that the end is exclusive. For
example, bucket(1, MAX, "positive number") or bucket(1, 10).
Histogram Facets:
- admin1: Admin1 is a global placeholder for referring to state,
province, or the particular term a country uses to define the
geographic structure below the country level. Examples include states
codes such as "CA", "IL", "NY", and provinces, such as "BC".
- locality: Locality is a global placeholder for referring to city,
town, or the particular term a country uses to define the geographic
structure below the admin1 level. Examples include city names such as
"Mountain View" and "New York".
- extended\_locality: Extended locality is concatenated version of
admin1 and locality with comma separator. For example, "Mountain
View, CA" and "New York, NY".
- postal\_code: Postal code of profile which follows locale code.
- country: Country code (ISO-3166-1 alpha-2 code) of profile, such as
US, JP, GB.
- job\_title: Normalized job titles specified in EmploymentHistory.
- company\_name: Normalized company name of profiles to match on.
- institution: The school name. For example, "MIT", "University of
California, Berkeley"
- degree: Highest education degree in ISCED code. Each value in degree
covers specific level of education, without any expansion to upper
nor lower levels of education degree.
- experience\_in\_months: experience in months. 0 means 0 month to 1
month (exclusive).
- application\_date: The application date specifies application start
dates. See [ApplicationDateFilter\` for more details.
- application\_outcome\_reason: The application outcome reason
specifies the outcome reasons of job application. See
``ApplicationOutcomeReasonFilter`` for more details.
- application\_last\_stage: The application last stage specifies the
last stage of job application. See ``ApplicationLastStageFilter`` for
more details.
- application\_job\_title: The application job title specifies the job
applied for in the application. See ``ApplicationJobFilter`` for more
details.
- application\_status: The application status specifies the status of
job application. See ``ApplicationStatusFilter`` for more details.
- hirable\_status: Hirable status specifies the profile's hirable
status.
- string\_custom\_attribute: String custom attributes. Values can be
accessed via square bracket notation like
string\_custom\_attribute["key1"].
- numeric\_custom\_attribute: Numeric custom attributes. Values can be
accessed via square bracket notation like
numeric\_custom\_attribute["key1"].
Example expressions:
- count(admin1)
- count(experience\_in\_months, [bucket(0, 12, "1 year"), bucket(12,
36, "1-3 years"), bucket(36, MAX, "3+ years")])
- count(string\_custom\_attribute["assigned\_recruiter"])
- count(numeric\_custom\_attribute["favorite\_number"], [bucket(MIN, 0,
"negative"), bucket(0, MAX, "non-negative")])
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.HistogramQuery`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.talent_v4beta1.types.HistogramQueryResult` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[search_profiles] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[search_profiles]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.search_profiles]]
variable[request] assign[=] call[name[profile_service_pb2].SearchProfilesRequest, parameter[]]
variable[iterator] assign[=] call[name[google].api_core.page_iterator.GRPCIterator, parameter[]]
return[name[iterator]]
|
keyword[def] identifier[search_profiles] (
identifier[self] ,
identifier[parent] ,
identifier[request_metadata] ,
identifier[profile_query] = keyword[None] ,
identifier[page_size] = keyword[None] ,
identifier[offset] = keyword[None] ,
identifier[disable_spell_check] = keyword[None] ,
identifier[order_by] = keyword[None] ,
identifier[case_sensitive_sort] = keyword[None] ,
identifier[histogram_queries] = keyword[None] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[search_profiles] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[profile_service_pb2] . identifier[SearchProfilesRequest] (
identifier[parent] = identifier[parent] ,
identifier[request_metadata] = identifier[request_metadata] ,
identifier[profile_query] = identifier[profile_query] ,
identifier[page_size] = identifier[page_size] ,
identifier[offset] = identifier[offset] ,
identifier[disable_spell_check] = identifier[disable_spell_check] ,
identifier[order_by] = identifier[order_by] ,
identifier[case_sensitive_sort] = identifier[case_sensitive_sort] ,
identifier[histogram_queries] = identifier[histogram_queries] ,
)
identifier[iterator] = identifier[google] . identifier[api_core] . identifier[page_iterator] . identifier[GRPCIterator] (
identifier[client] = keyword[None] ,
identifier[method] = identifier[functools] . identifier[partial] (
identifier[self] . identifier[_inner_api_calls] [ literal[string] ],
identifier[retry] = identifier[retry] ,
identifier[timeout] = identifier[timeout] ,
identifier[metadata] = identifier[metadata] ,
),
identifier[request] = identifier[request] ,
identifier[items_field] = literal[string] ,
identifier[request_token_field] = literal[string] ,
identifier[response_token_field] = literal[string] ,
)
keyword[return] identifier[iterator]
|
def search_profiles(self, parent, request_metadata, profile_query=None, page_size=None, offset=None, disable_spell_check=None, order_by=None, case_sensitive_sort=None, histogram_queries=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Searches for profiles within a tenant.
For example, search by raw queries "software engineer in Mountain View"
or search by structured filters (location filter, education filter,
etc.).
See ``SearchProfilesRequest`` for more information.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ProfileServiceClient()
>>>
>>> parent = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> # TODO: Initialize `request_metadata`:
>>> request_metadata = {}
>>>
>>> # Iterate over all results
>>> for element in client.search_profiles(parent, request_metadata):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.search_profiles(parent, request_metadata).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The resource name of the tenant to search within.
The format is "projects/{project\\_id}/tenants/{tenant\\_id}", for
example, "projects/api-test-project/tenants/foo".
request_metadata (Union[dict, ~google.cloud.talent_v4beta1.types.RequestMetadata]): Required.
The meta information collected about the profile search user. This is used
to improve the search quality of the service. These values are provided by
users, and must be precise and consistent.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.RequestMetadata`
profile_query (Union[dict, ~google.cloud.talent_v4beta1.types.ProfileQuery]): Optional.
Search query to execute. See ``ProfileQuery`` for more details.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.ProfileQuery`
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
offset (int): Optional.
An integer that specifies the current offset (that is, starting result)
in search results. This field is only considered if ``page_token`` is
unset.
The maximum allowed value is 5000. Otherwise an error is thrown.
For example, 0 means to search from the first profile, and 10 means to
search from the 11th profile. This can be used for pagination, for
example pageSize = 10 and offset = 10 means to search from the second
page.
disable_spell_check (bool): Optional.
This flag controls the spell-check feature. If ``false``, the service
attempts to correct a misspelled query.
For example, "enginee" is corrected to "engineer".
order_by (str): Optional.
The criteria that determines how search results are sorted. Defaults is
"relevance desc" if no value is specified.
Supported options are:
- "relevance desc": By descending relevance, as determined by the API
algorithms.
- "update\\_date desc": Sort by ``Profile.update_date`` in descending
order (recently updated profiles first).
- "create\\_date desc": Sort by ``Profile.create_date`` in descending
order (recently created profiles first).
- "first\\_name": Sort by ``PersonStrcuturedName.given_name`` in
ascending order.
- "first\\_name desc": Sort by ``PersonStrcuturedName.given_name`` in
descending order.
- "last\\_name": Sort by ``PersonStrcuturedName.family_name`` in
ascending order.
- "last\\_name desc": Sort by ``PersonStrcuturedName.family_name`` in
ascending order.
case_sensitive_sort (bool): Optional.
When sort by field is based on alphabetical order, sort values case
sensitively (based on ASCII) when the value is set to true. Default value
is case in-sensitive sort (false).
histogram_queries (list[Union[dict, ~google.cloud.talent_v4beta1.types.HistogramQuery]]): Optional.
A list of expressions specifies histogram requests against matching
profiles for ``SearchProfilesRequest``.
The expression syntax looks like a function definition with optional
parameters.
Function syntax: function\\_name(histogram\\_facet[, list of buckets])
Data types:
- Histogram facet: facet names with format [a-zA-Z][a-zA-Z0-9\\_]+.
- String: string like "any string with backslash escape for quote(")."
- Number: whole number and floating point number like 10, -1 and -0.01.
- List: list of elements with comma(,) separator surrounded by square
brackets. For example, [1, 2, 3] and ["one", "two", "three"].
Built-in constants:
- MIN (minimum number similar to java Double.MIN\\_VALUE)
- MAX (maximum number similar to java Double.MAX\\_VALUE)
Built-in functions:
- bucket(start, end[, label]) Bucket build-in function creates a bucket
with range of \\`start, end). Note that the end is exclusive. For
example, bucket(1, MAX, "positive number") or bucket(1, 10).
Histogram Facets:
- admin1: Admin1 is a global placeholder for referring to state,
province, or the particular term a country uses to define the
geographic structure below the country level. Examples include states
codes such as "CA", "IL", "NY", and provinces, such as "BC".
- locality: Locality is a global placeholder for referring to city,
town, or the particular term a country uses to define the geographic
structure below the admin1 level. Examples include city names such as
"Mountain View" and "New York".
- extended\\_locality: Extended locality is concatenated version of
admin1 and locality with comma separator. For example, "Mountain
View, CA" and "New York, NY".
- postal\\_code: Postal code of profile which follows locale code.
- country: Country code (ISO-3166-1 alpha-2 code) of profile, such as
US, JP, GB.
- job\\_title: Normalized job titles specified in EmploymentHistory.
- company\\_name: Normalized company name of profiles to match on.
- institution: The school name. For example, "MIT", "University of
California, Berkeley"
- degree: Highest education degree in ISCED code. Each value in degree
covers specific level of education, without any expansion to upper
nor lower levels of education degree.
- experience\\_in\\_months: experience in months. 0 means 0 month to 1
month (exclusive).
- application\\_date: The application date specifies application start
dates. See [ApplicationDateFilter\\` for more details.
- application\\_outcome\\_reason: The application outcome reason
specifies the outcome reasons of job application. See
``ApplicationOutcomeReasonFilter`` for more details.
- application\\_last\\_stage: The application last stage specifies the
last stage of job application. See ``ApplicationLastStageFilter`` for
more details.
- application\\_job\\_title: The application job title specifies the job
applied for in the application. See ``ApplicationJobFilter`` for more
details.
- application\\_status: The application status specifies the status of
job application. See ``ApplicationStatusFilter`` for more details.
- hirable\\_status: Hirable status specifies the profile's hirable
status.
- string\\_custom\\_attribute: String custom attributes. Values can be
accessed via square bracket notation like
string\\_custom\\_attribute["key1"].
- numeric\\_custom\\_attribute: Numeric custom attributes. Values can be
accessed via square bracket notation like
numeric\\_custom\\_attribute["key1"].
Example expressions:
- count(admin1)
- count(experience\\_in\\_months, [bucket(0, 12, "1 year"), bucket(12,
36, "1-3 years"), bucket(36, MAX, "3+ years")])
- count(string\\_custom\\_attribute["assigned\\_recruiter"])
- count(numeric\\_custom\\_attribute["favorite\\_number"], [bucket(MIN, 0,
"negative"), bucket(0, MAX, "non-negative")])
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.HistogramQuery`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.talent_v4beta1.types.HistogramQueryResult` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'search_profiles' not in self._inner_api_calls:
self._inner_api_calls['search_profiles'] = google.api_core.gapic_v1.method.wrap_method(self.transport.search_profiles, default_retry=self._method_configs['SearchProfiles'].retry, default_timeout=self._method_configs['SearchProfiles'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = profile_service_pb2.SearchProfilesRequest(parent=parent, request_metadata=request_metadata, profile_query=profile_query, page_size=page_size, offset=offset, disable_spell_check=disable_spell_check, order_by=order_by, case_sensitive_sort=case_sensitive_sort, histogram_queries=histogram_queries)
iterator = google.api_core.page_iterator.GRPCIterator(client=None, method=functools.partial(self._inner_api_calls['search_profiles'], retry=retry, timeout=timeout, metadata=metadata), request=request, items_field='histogram_query_results', request_token_field='page_token', response_token_field='next_page_token')
return iterator
|
def Get(self):
"""Fetches user's data and returns it wrapped in a Grruser object."""
args = user_management_pb2.ApiGetGrrUserArgs(username=self.username)
data = self._context.SendRequest("GetGrrUser", args)
return GrrUser(data=data, context=self._context)
|
def function[Get, parameter[self]]:
constant[Fetches user's data and returns it wrapped in a Grruser object.]
variable[args] assign[=] call[name[user_management_pb2].ApiGetGrrUserArgs, parameter[]]
variable[data] assign[=] call[name[self]._context.SendRequest, parameter[constant[GetGrrUser], name[args]]]
return[call[name[GrrUser], parameter[]]]
|
keyword[def] identifier[Get] ( identifier[self] ):
literal[string]
identifier[args] = identifier[user_management_pb2] . identifier[ApiGetGrrUserArgs] ( identifier[username] = identifier[self] . identifier[username] )
identifier[data] = identifier[self] . identifier[_context] . identifier[SendRequest] ( literal[string] , identifier[args] )
keyword[return] identifier[GrrUser] ( identifier[data] = identifier[data] , identifier[context] = identifier[self] . identifier[_context] )
|
def Get(self):
"""Fetches user's data and returns it wrapped in a Grruser object."""
args = user_management_pb2.ApiGetGrrUserArgs(username=self.username)
data = self._context.SendRequest('GetGrrUser', args)
return GrrUser(data=data, context=self._context)
|
def pivot_query_as_matrix(facet=None, facet_pivot_fields=None, **kwargs):
"""
Pivot query
"""
if facet_pivot_fields is None:
facet_pivot_fields = []
logging.info("Additional args: {}".format(kwargs))
fp = search_associations(rows=0,
facet_fields=[facet],
facet_pivot_fields=facet_pivot_fields,
**kwargs)['facet_pivot']
# we assume only one
results = list(fp.items())[0][1]
tups = []
xtype=None
ytype=None
xlabels=set()
ylabels=set()
for r in results:
logging.info("R={}".format(r))
xtype=r['field']
rv = r['value']
xlabels.add(rv)
for piv in r['pivot']:
ytype=piv['field']
pv = piv['value']
ylabels.add(pv)
tups.append( (rv,pv,piv['count']) )
z = [ [0] * len(xlabels) for i1 in range(len(ylabels)) ]
xlabels=list(xlabels)
ylabels=list(ylabels)
xmap = dict([x[::-1] for x in enumerate(xlabels)])
ymap = dict([x[::-1] for x in enumerate(ylabels)])
for t in tups:
z[ymap[t[1]]][xmap[t[0]]] = t[2]
m = {'xtype':xtype,
'ytype':ytype,
'xaxis':xlabels,
'yaxis':ylabels,
'z':z}
return m
|
def function[pivot_query_as_matrix, parameter[facet, facet_pivot_fields]]:
constant[
Pivot query
]
if compare[name[facet_pivot_fields] is constant[None]] begin[:]
variable[facet_pivot_fields] assign[=] list[[]]
call[name[logging].info, parameter[call[constant[Additional args: {}].format, parameter[name[kwargs]]]]]
variable[fp] assign[=] call[call[name[search_associations], parameter[]]][constant[facet_pivot]]
variable[results] assign[=] call[call[call[name[list], parameter[call[name[fp].items, parameter[]]]]][constant[0]]][constant[1]]
variable[tups] assign[=] list[[]]
variable[xtype] assign[=] constant[None]
variable[ytype] assign[=] constant[None]
variable[xlabels] assign[=] call[name[set], parameter[]]
variable[ylabels] assign[=] call[name[set], parameter[]]
for taget[name[r]] in starred[name[results]] begin[:]
call[name[logging].info, parameter[call[constant[R={}].format, parameter[name[r]]]]]
variable[xtype] assign[=] call[name[r]][constant[field]]
variable[rv] assign[=] call[name[r]][constant[value]]
call[name[xlabels].add, parameter[name[rv]]]
for taget[name[piv]] in starred[call[name[r]][constant[pivot]]] begin[:]
variable[ytype] assign[=] call[name[piv]][constant[field]]
variable[pv] assign[=] call[name[piv]][constant[value]]
call[name[ylabels].add, parameter[name[pv]]]
call[name[tups].append, parameter[tuple[[<ast.Name object at 0x7da1b08bbd60>, <ast.Name object at 0x7da1b08bbd90>, <ast.Subscript object at 0x7da1b08bbdc0>]]]]
variable[z] assign[=] <ast.ListComp object at 0x7da1b08bbee0>
variable[xlabels] assign[=] call[name[list], parameter[name[xlabels]]]
variable[ylabels] assign[=] call[name[list], parameter[name[ylabels]]]
variable[xmap] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b08b9b40>]]
variable[ymap] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b08b9840>]]
for taget[name[t]] in starred[name[tups]] begin[:]
call[call[name[z]][call[name[ymap]][call[name[t]][constant[1]]]]][call[name[xmap]][call[name[t]][constant[0]]]] assign[=] call[name[t]][constant[2]]
variable[m] assign[=] dictionary[[<ast.Constant object at 0x7da1b08b9180>, <ast.Constant object at 0x7da1b08b9150>, <ast.Constant object at 0x7da1b08b9120>, <ast.Constant object at 0x7da1b08b90f0>, <ast.Constant object at 0x7da1b08b90c0>], [<ast.Name object at 0x7da1b08b9060>, <ast.Name object at 0x7da1b08b9030>, <ast.Name object at 0x7da1b08b9000>, <ast.Name object at 0x7da1b08b8fd0>, <ast.Name object at 0x7da1b08b8fa0>]]
return[name[m]]
|
keyword[def] identifier[pivot_query_as_matrix] ( identifier[facet] = keyword[None] , identifier[facet_pivot_fields] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[facet_pivot_fields] keyword[is] keyword[None] :
identifier[facet_pivot_fields] =[]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[kwargs] ))
identifier[fp] = identifier[search_associations] ( identifier[rows] = literal[int] ,
identifier[facet_fields] =[ identifier[facet] ],
identifier[facet_pivot_fields] = identifier[facet_pivot_fields] ,
** identifier[kwargs] )[ literal[string] ]
identifier[results] = identifier[list] ( identifier[fp] . identifier[items] ())[ literal[int] ][ literal[int] ]
identifier[tups] =[]
identifier[xtype] = keyword[None]
identifier[ytype] = keyword[None]
identifier[xlabels] = identifier[set] ()
identifier[ylabels] = identifier[set] ()
keyword[for] identifier[r] keyword[in] identifier[results] :
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[r] ))
identifier[xtype] = identifier[r] [ literal[string] ]
identifier[rv] = identifier[r] [ literal[string] ]
identifier[xlabels] . identifier[add] ( identifier[rv] )
keyword[for] identifier[piv] keyword[in] identifier[r] [ literal[string] ]:
identifier[ytype] = identifier[piv] [ literal[string] ]
identifier[pv] = identifier[piv] [ literal[string] ]
identifier[ylabels] . identifier[add] ( identifier[pv] )
identifier[tups] . identifier[append] (( identifier[rv] , identifier[pv] , identifier[piv] [ literal[string] ]))
identifier[z] =[[ literal[int] ]* identifier[len] ( identifier[xlabels] ) keyword[for] identifier[i1] keyword[in] identifier[range] ( identifier[len] ( identifier[ylabels] ))]
identifier[xlabels] = identifier[list] ( identifier[xlabels] )
identifier[ylabels] = identifier[list] ( identifier[ylabels] )
identifier[xmap] = identifier[dict] ([ identifier[x] [::- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[enumerate] ( identifier[xlabels] )])
identifier[ymap] = identifier[dict] ([ identifier[x] [::- literal[int] ] keyword[for] identifier[x] keyword[in] identifier[enumerate] ( identifier[ylabels] )])
keyword[for] identifier[t] keyword[in] identifier[tups] :
identifier[z] [ identifier[ymap] [ identifier[t] [ literal[int] ]]][ identifier[xmap] [ identifier[t] [ literal[int] ]]]= identifier[t] [ literal[int] ]
identifier[m] ={ literal[string] : identifier[xtype] ,
literal[string] : identifier[ytype] ,
literal[string] : identifier[xlabels] ,
literal[string] : identifier[ylabels] ,
literal[string] : identifier[z] }
keyword[return] identifier[m]
|
def pivot_query_as_matrix(facet=None, facet_pivot_fields=None, **kwargs):
"""
Pivot query
"""
if facet_pivot_fields is None:
facet_pivot_fields = [] # depends on [control=['if'], data=['facet_pivot_fields']]
logging.info('Additional args: {}'.format(kwargs))
fp = search_associations(rows=0, facet_fields=[facet], facet_pivot_fields=facet_pivot_fields, **kwargs)['facet_pivot']
# we assume only one
results = list(fp.items())[0][1]
tups = []
xtype = None
ytype = None
xlabels = set()
ylabels = set()
for r in results:
logging.info('R={}'.format(r))
xtype = r['field']
rv = r['value']
xlabels.add(rv)
for piv in r['pivot']:
ytype = piv['field']
pv = piv['value']
ylabels.add(pv)
tups.append((rv, pv, piv['count'])) # depends on [control=['for'], data=['piv']] # depends on [control=['for'], data=['r']]
z = [[0] * len(xlabels) for i1 in range(len(ylabels))]
xlabels = list(xlabels)
ylabels = list(ylabels)
xmap = dict([x[::-1] for x in enumerate(xlabels)])
ymap = dict([x[::-1] for x in enumerate(ylabels)])
for t in tups:
z[ymap[t[1]]][xmap[t[0]]] = t[2] # depends on [control=['for'], data=['t']]
m = {'xtype': xtype, 'ytype': ytype, 'xaxis': xlabels, 'yaxis': ylabels, 'z': z}
return m
|
def item_deelgemeente_adapter(obj, request):
"""
Adapter for rendering a object of
:class:`crabpy.gateway.crab.Deelgemeente` to json.
"""
return {
'id': obj.id,
'naam': obj.naam,
'gemeente': {
'id': obj.gemeente.id,
'naam': obj.gemeente.naam
}
}
|
def function[item_deelgemeente_adapter, parameter[obj, request]]:
constant[
Adapter for rendering a object of
:class:`crabpy.gateway.crab.Deelgemeente` to json.
]
return[dictionary[[<ast.Constant object at 0x7da1b0a9cd90>, <ast.Constant object at 0x7da1b0a9e5c0>, <ast.Constant object at 0x7da1b0a9e710>], [<ast.Attribute object at 0x7da1b0a9fd00>, <ast.Attribute object at 0x7da1b0a9d7b0>, <ast.Dict object at 0x7da1b0a9e6b0>]]]
|
keyword[def] identifier[item_deelgemeente_adapter] ( identifier[obj] , identifier[request] ):
literal[string]
keyword[return] {
literal[string] : identifier[obj] . identifier[id] ,
literal[string] : identifier[obj] . identifier[naam] ,
literal[string] :{
literal[string] : identifier[obj] . identifier[gemeente] . identifier[id] ,
literal[string] : identifier[obj] . identifier[gemeente] . identifier[naam]
}
}
|
def item_deelgemeente_adapter(obj, request):
"""
Adapter for rendering a object of
:class:`crabpy.gateway.crab.Deelgemeente` to json.
"""
return {'id': obj.id, 'naam': obj.naam, 'gemeente': {'id': obj.gemeente.id, 'naam': obj.gemeente.naam}}
|
def to_file(self, f):
"""Write vocab to a file.
:param (file) f: a file object, e.g. as returned by calling `open`
File format:
word0<TAB>count0
word1<TAB>count1
...
word with index 0 is on the 0th line and so on...
"""
for word in self._index2word:
count = self._counts[word]
f.write(u'{}\t{}\n'.format(word, count).encode('utf-8'))
|
def function[to_file, parameter[self, f]]:
constant[Write vocab to a file.
:param (file) f: a file object, e.g. as returned by calling `open`
File format:
word0<TAB>count0
word1<TAB>count1
...
word with index 0 is on the 0th line and so on...
]
for taget[name[word]] in starred[name[self]._index2word] begin[:]
variable[count] assign[=] call[name[self]._counts][name[word]]
call[name[f].write, parameter[call[call[constant[{} {}
].format, parameter[name[word], name[count]]].encode, parameter[constant[utf-8]]]]]
|
keyword[def] identifier[to_file] ( identifier[self] , identifier[f] ):
literal[string]
keyword[for] identifier[word] keyword[in] identifier[self] . identifier[_index2word] :
identifier[count] = identifier[self] . identifier[_counts] [ identifier[word] ]
identifier[f] . identifier[write] ( literal[string] . identifier[format] ( identifier[word] , identifier[count] ). identifier[encode] ( literal[string] ))
|
def to_file(self, f):
"""Write vocab to a file.
:param (file) f: a file object, e.g. as returned by calling `open`
File format:
word0<TAB>count0
word1<TAB>count1
...
word with index 0 is on the 0th line and so on...
"""
for word in self._index2word:
count = self._counts[word]
f.write(u'{}\t{}\n'.format(word, count).encode('utf-8')) # depends on [control=['for'], data=['word']]
|
def rdrecord(record_name, sampfrom=0, sampto=None, channels=None,
physical=True, pb_dir=None, m2s=True, smooth_frames=True,
ignore_skew=False, return_res=64, force_channels=True,
channel_names=None, warn_empty=False):
"""
Read a WFDB record and return the signal and record descriptors as
attributes in a Record or MultiRecord object.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool, optional
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
m2s : bool, optional
Used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert
it into and return a wfdb Record object (True).
smooth_frames : bool, optional
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
force_channels : bool, optional
Used when reading multi-segment variable layout records. Whether
to update the layout specification record, and the converted
Record object if `m2s` is True, to match the input `channels`
argument, or to omit channels in which no read segment contains
the signals.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the record read.
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdsamp` function exists as a simple alternative to `rdrecord`
for the common purpose of extracting the physical signals and a few
important descriptor fields.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
channels=[1, 3])
"""
dir_name, base_record_name = os.path.split(record_name)
dir_name = os.path.abspath(dir_name)
# Read the header fields
record = rdheader(record_name, pb_dir=pb_dir, rd_segments=False)
# Set defaults for sampto and channels input variables
if sampto is None:
# If the header does not contain the signal length, figure it
# out from the first dat file. This is only possible for single
# segment records. If there are no signals, sig_len is 0.
if record.sig_len is None:
if record.n_sig == 0:
record.sig_len = 0
else:
record.sig_len = _signal._infer_sig_len(
file_name=record.file_name[0], fmt=record.fmt[0],
n_sig=record.file_name.count(record.file_name[0]),
dir_name=dir_name, pb_dir=pb_dir)
sampto = record.sig_len
# channel_names takes precedence over channels
if channel_names is not None:
# Figure out the channel indices matching the record, if any.
if isinstance(record, Record):
reference_record = record
else:
if record.layout == 'fixed':
# Find the first non-empty segment to get the signal
# names
first_seg_name = [n for n in record.seg_name if n != '~'][0]
reference_record = rdheader(os.path.join(dir_name,
record.seg_name[0]),
pb_dir=pb_dir)
else:
# Use the layout specification header to get the signal
# names
reference_record = rdheader(os.path.join(dir_name,
record.seg_name[0]),
pb_dir=pb_dir)
channels = _get_wanted_channels(wanted_sig_names=channel_names,
record_sig_names=reference_record.sig_name)
elif channels is None:
channels = list(range(record.n_sig))
# Ensure that input fields are valid for the record
record.check_read_inputs(sampfrom, sampto, channels, physical,
smooth_frames, return_res)
# If the signal doesn't have the specified channels, there will be
# no signal. Recall that `rdsamp` is not called on segments of multi
# segment records if the channels are not present, so this won't
# break anything.
if not len(channels):
old_record = record
record = Record()
for attr in _header.RECORD_SPECS.index:
if attr == 'n_seg':
continue
elif attr in ['n_sig', 'sig_len']:
setattr(record, attr, 0)
else:
setattr(record, attr, getattr(old_record, attr))
if warn_empty:
print('None of the specified signals were contained in the record')
# A single segment record
elif isinstance(record, Record):
# Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
if smooth_frames or max([record.samps_per_frame[c] for c in channels]) == 1:
# Read signals from the associated dat files that contain
# wanted channels
record.d_signal = _signal._rd_segment(record.file_name, dir_name,
pb_dir, record.fmt,
record.n_sig, record.sig_len,
record.byte_offset,
record.samps_per_frame,
record.skew, sampfrom, sampto,
channels, smooth_frames,
ignore_skew)
# Arrange/edit the object fields to reflect user channel
# and/or signal range input
record._arrange_fields(channels=channels, sampfrom=sampfrom,
expanded=False)
if physical:
# Perform inplace dac to get physical signal
record.dac(expanded=False, return_res=return_res, inplace=True)
# Return each sample of the signals with multiple samples per frame
else:
record.e_d_signal = _signal._rd_segment(record.file_name, dir_name,
pb_dir, record.fmt,
record.n_sig,
record.sig_len,
record.byte_offset,
record.samps_per_frame,
record.skew, sampfrom,
sampto, channels,
smooth_frames, ignore_skew)
# Arrange/edit the object fields to reflect user channel
# and/or signal range input
record._arrange_fields(channels=channels, sampfrom=sampfrom,
expanded=True)
if physical:
# Perform dac to get physical signal
record.dac(expanded=True, return_res=return_res, inplace=True)
# A multi segment record
else:
# Strategy:
# 1. Read the required segments and store them in
# Record objects.
# 2. Update the parameters of the objects to reflect
# the state of the sections read.
# 3. Update the parameters of the overall MultiRecord
# object to reflect the state of the individual segments.
# 4. If specified, convert the MultiRecord object
# into a single Record object.
# Segments field is a list of Record objects
# Empty segments store None.
record.segments = [None] * record.n_seg
# Variable layout, read the layout specification header
if record.layout == 'variable':
record.segments[0] = rdheader(os.path.join(dir_name,
record.seg_name[0]),
pb_dir=pb_dir)
# The segment numbers and samples within each segment to read.
seg_numbers, seg_ranges = record._required_segments(sampfrom, sampto)
# The channels within each segment to read
seg_channels = record._required_channels(seg_numbers, channels,
dir_name, pb_dir)
# Read the desired samples in the relevant segments
for i in range(len(seg_numbers)):
seg_num = seg_numbers[i]
# Empty segment or segment with no relevant channels
if record.seg_name[seg_num] == '~' or len(seg_channels[i]) == 0:
record.segments[seg_num] = None
else:
record.segments[seg_num] = rdrecord(
os.path.join(dir_name, record.seg_name[seg_num]),
sampfrom=seg_ranges[i][0], sampto=seg_ranges[i][1],
channels=seg_channels[i], physical=physical, pb_dir=pb_dir)
# Arrange the fields of the layout specification segment, and
# the overall object, to reflect user input.
record._arrange_fields(seg_numbers=seg_numbers, seg_ranges=seg_ranges,
channels=channels, sampfrom=sampfrom,
force_channels=force_channels)
# Convert object into a single segment Record object
if m2s:
record = record.multi_to_single(physical=physical,
return_res=return_res)
# Perform dtype conversion if necessary
if isinstance(record, Record) and record.n_sig > 0:
record.convert_dtype(physical, return_res, smooth_frames)
return record
|
def function[rdrecord, parameter[record_name, sampfrom, sampto, channels, physical, pb_dir, m2s, smooth_frames, ignore_skew, return_res, force_channels, channel_names, warn_empty]]:
constant[
Read a WFDB record and return the signal and record descriptors as
attributes in a Record or MultiRecord object.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool, optional
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
m2s : bool, optional
Used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert
it into and return a wfdb Record object (True).
smooth_frames : bool, optional
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
force_channels : bool, optional
Used when reading multi-segment variable layout records. Whether
to update the layout specification record, and the converted
Record object if `m2s` is True, to match the input `channels`
argument, or to omit channels in which no read segment contains
the signals.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the record read.
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdsamp` function exists as a simple alternative to `rdrecord`
for the common purpose of extracting the physical signals and a few
important descriptor fields.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
channels=[1, 3])
]
<ast.Tuple object at 0x7da1b19baf80> assign[=] call[name[os].path.split, parameter[name[record_name]]]
variable[dir_name] assign[=] call[name[os].path.abspath, parameter[name[dir_name]]]
variable[record] assign[=] call[name[rdheader], parameter[name[record_name]]]
if compare[name[sampto] is constant[None]] begin[:]
if compare[name[record].sig_len is constant[None]] begin[:]
if compare[name[record].n_sig equal[==] constant[0]] begin[:]
name[record].sig_len assign[=] constant[0]
variable[sampto] assign[=] name[record].sig_len
if compare[name[channel_names] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[record], name[Record]]] begin[:]
variable[reference_record] assign[=] name[record]
variable[channels] assign[=] call[name[_get_wanted_channels], parameter[]]
call[name[record].check_read_inputs, parameter[name[sampfrom], name[sampto], name[channels], name[physical], name[smooth_frames], name[return_res]]]
if <ast.UnaryOp object at 0x7da1b19bbe50> begin[:]
variable[old_record] assign[=] name[record]
variable[record] assign[=] call[name[Record], parameter[]]
for taget[name[attr]] in starred[name[_header].RECORD_SPECS.index] begin[:]
if compare[name[attr] equal[==] constant[n_seg]] begin[:]
continue
if name[warn_empty] begin[:]
call[name[print], parameter[constant[None of the specified signals were contained in the record]]]
if <ast.BoolOp object at 0x7da1b19ec250> begin[:]
call[name[record].convert_dtype, parameter[name[physical], name[return_res], name[smooth_frames]]]
return[name[record]]
|
keyword[def] identifier[rdrecord] ( identifier[record_name] , identifier[sampfrom] = literal[int] , identifier[sampto] = keyword[None] , identifier[channels] = keyword[None] ,
identifier[physical] = keyword[True] , identifier[pb_dir] = keyword[None] , identifier[m2s] = keyword[True] , identifier[smooth_frames] = keyword[True] ,
identifier[ignore_skew] = keyword[False] , identifier[return_res] = literal[int] , identifier[force_channels] = keyword[True] ,
identifier[channel_names] = keyword[None] , identifier[warn_empty] = keyword[False] ):
literal[string]
identifier[dir_name] , identifier[base_record_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[record_name] )
identifier[dir_name] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[dir_name] )
identifier[record] = identifier[rdheader] ( identifier[record_name] , identifier[pb_dir] = identifier[pb_dir] , identifier[rd_segments] = keyword[False] )
keyword[if] identifier[sampto] keyword[is] keyword[None] :
keyword[if] identifier[record] . identifier[sig_len] keyword[is] keyword[None] :
keyword[if] identifier[record] . identifier[n_sig] == literal[int] :
identifier[record] . identifier[sig_len] = literal[int]
keyword[else] :
identifier[record] . identifier[sig_len] = identifier[_signal] . identifier[_infer_sig_len] (
identifier[file_name] = identifier[record] . identifier[file_name] [ literal[int] ], identifier[fmt] = identifier[record] . identifier[fmt] [ literal[int] ],
identifier[n_sig] = identifier[record] . identifier[file_name] . identifier[count] ( identifier[record] . identifier[file_name] [ literal[int] ]),
identifier[dir_name] = identifier[dir_name] , identifier[pb_dir] = identifier[pb_dir] )
identifier[sampto] = identifier[record] . identifier[sig_len]
keyword[if] identifier[channel_names] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[record] , identifier[Record] ):
identifier[reference_record] = identifier[record]
keyword[else] :
keyword[if] identifier[record] . identifier[layout] == literal[string] :
identifier[first_seg_name] =[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[record] . identifier[seg_name] keyword[if] identifier[n] != literal[string] ][ literal[int] ]
identifier[reference_record] = identifier[rdheader] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] ,
identifier[record] . identifier[seg_name] [ literal[int] ]),
identifier[pb_dir] = identifier[pb_dir] )
keyword[else] :
identifier[reference_record] = identifier[rdheader] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] ,
identifier[record] . identifier[seg_name] [ literal[int] ]),
identifier[pb_dir] = identifier[pb_dir] )
identifier[channels] = identifier[_get_wanted_channels] ( identifier[wanted_sig_names] = identifier[channel_names] ,
identifier[record_sig_names] = identifier[reference_record] . identifier[sig_name] )
keyword[elif] identifier[channels] keyword[is] keyword[None] :
identifier[channels] = identifier[list] ( identifier[range] ( identifier[record] . identifier[n_sig] ))
identifier[record] . identifier[check_read_inputs] ( identifier[sampfrom] , identifier[sampto] , identifier[channels] , identifier[physical] ,
identifier[smooth_frames] , identifier[return_res] )
keyword[if] keyword[not] identifier[len] ( identifier[channels] ):
identifier[old_record] = identifier[record]
identifier[record] = identifier[Record] ()
keyword[for] identifier[attr] keyword[in] identifier[_header] . identifier[RECORD_SPECS] . identifier[index] :
keyword[if] identifier[attr] == literal[string] :
keyword[continue]
keyword[elif] identifier[attr] keyword[in] [ literal[string] , literal[string] ]:
identifier[setattr] ( identifier[record] , identifier[attr] , literal[int] )
keyword[else] :
identifier[setattr] ( identifier[record] , identifier[attr] , identifier[getattr] ( identifier[old_record] , identifier[attr] ))
keyword[if] identifier[warn_empty] :
identifier[print] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[record] , identifier[Record] ):
keyword[if] identifier[smooth_frames] keyword[or] identifier[max] ([ identifier[record] . identifier[samps_per_frame] [ identifier[c] ] keyword[for] identifier[c] keyword[in] identifier[channels] ])== literal[int] :
identifier[record] . identifier[d_signal] = identifier[_signal] . identifier[_rd_segment] ( identifier[record] . identifier[file_name] , identifier[dir_name] ,
identifier[pb_dir] , identifier[record] . identifier[fmt] ,
identifier[record] . identifier[n_sig] , identifier[record] . identifier[sig_len] ,
identifier[record] . identifier[byte_offset] ,
identifier[record] . identifier[samps_per_frame] ,
identifier[record] . identifier[skew] , identifier[sampfrom] , identifier[sampto] ,
identifier[channels] , identifier[smooth_frames] ,
identifier[ignore_skew] )
identifier[record] . identifier[_arrange_fields] ( identifier[channels] = identifier[channels] , identifier[sampfrom] = identifier[sampfrom] ,
identifier[expanded] = keyword[False] )
keyword[if] identifier[physical] :
identifier[record] . identifier[dac] ( identifier[expanded] = keyword[False] , identifier[return_res] = identifier[return_res] , identifier[inplace] = keyword[True] )
keyword[else] :
identifier[record] . identifier[e_d_signal] = identifier[_signal] . identifier[_rd_segment] ( identifier[record] . identifier[file_name] , identifier[dir_name] ,
identifier[pb_dir] , identifier[record] . identifier[fmt] ,
identifier[record] . identifier[n_sig] ,
identifier[record] . identifier[sig_len] ,
identifier[record] . identifier[byte_offset] ,
identifier[record] . identifier[samps_per_frame] ,
identifier[record] . identifier[skew] , identifier[sampfrom] ,
identifier[sampto] , identifier[channels] ,
identifier[smooth_frames] , identifier[ignore_skew] )
identifier[record] . identifier[_arrange_fields] ( identifier[channels] = identifier[channels] , identifier[sampfrom] = identifier[sampfrom] ,
identifier[expanded] = keyword[True] )
keyword[if] identifier[physical] :
identifier[record] . identifier[dac] ( identifier[expanded] = keyword[True] , identifier[return_res] = identifier[return_res] , identifier[inplace] = keyword[True] )
keyword[else] :
identifier[record] . identifier[segments] =[ keyword[None] ]* identifier[record] . identifier[n_seg]
keyword[if] identifier[record] . identifier[layout] == literal[string] :
identifier[record] . identifier[segments] [ literal[int] ]= identifier[rdheader] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] ,
identifier[record] . identifier[seg_name] [ literal[int] ]),
identifier[pb_dir] = identifier[pb_dir] )
identifier[seg_numbers] , identifier[seg_ranges] = identifier[record] . identifier[_required_segments] ( identifier[sampfrom] , identifier[sampto] )
identifier[seg_channels] = identifier[record] . identifier[_required_channels] ( identifier[seg_numbers] , identifier[channels] ,
identifier[dir_name] , identifier[pb_dir] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[seg_numbers] )):
identifier[seg_num] = identifier[seg_numbers] [ identifier[i] ]
keyword[if] identifier[record] . identifier[seg_name] [ identifier[seg_num] ]== literal[string] keyword[or] identifier[len] ( identifier[seg_channels] [ identifier[i] ])== literal[int] :
identifier[record] . identifier[segments] [ identifier[seg_num] ]= keyword[None]
keyword[else] :
identifier[record] . identifier[segments] [ identifier[seg_num] ]= identifier[rdrecord] (
identifier[os] . identifier[path] . identifier[join] ( identifier[dir_name] , identifier[record] . identifier[seg_name] [ identifier[seg_num] ]),
identifier[sampfrom] = identifier[seg_ranges] [ identifier[i] ][ literal[int] ], identifier[sampto] = identifier[seg_ranges] [ identifier[i] ][ literal[int] ],
identifier[channels] = identifier[seg_channels] [ identifier[i] ], identifier[physical] = identifier[physical] , identifier[pb_dir] = identifier[pb_dir] )
identifier[record] . identifier[_arrange_fields] ( identifier[seg_numbers] = identifier[seg_numbers] , identifier[seg_ranges] = identifier[seg_ranges] ,
identifier[channels] = identifier[channels] , identifier[sampfrom] = identifier[sampfrom] ,
identifier[force_channels] = identifier[force_channels] )
keyword[if] identifier[m2s] :
identifier[record] = identifier[record] . identifier[multi_to_single] ( identifier[physical] = identifier[physical] ,
identifier[return_res] = identifier[return_res] )
keyword[if] identifier[isinstance] ( identifier[record] , identifier[Record] ) keyword[and] identifier[record] . identifier[n_sig] > literal[int] :
identifier[record] . identifier[convert_dtype] ( identifier[physical] , identifier[return_res] , identifier[smooth_frames] )
keyword[return] identifier[record]
|
def rdrecord(record_name, sampfrom=0, sampto=None, channels=None, physical=True, pb_dir=None, m2s=True, smooth_frames=True, ignore_skew=False, return_res=64, force_channels=True, channel_names=None, warn_empty=False):
"""
Read a WFDB record and return the signal and record descriptors as
attributes in a Record or MultiRecord object.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool, optional
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
m2s : bool, optional
Used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert
it into and return a wfdb Record object (True).
smooth_frames : bool, optional
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
force_channels : bool, optional
Used when reading multi-segment variable layout records. Whether
to update the layout specification record, and the converted
Record object if `m2s` is True, to match the input `channels`
argument, or to omit channels in which no read segment contains
the signals.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the record read.
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdsamp` function exists as a simple alternative to `rdrecord`
for the common purpose of extracting the physical signals and a few
important descriptor fields.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
channels=[1, 3])
"""
(dir_name, base_record_name) = os.path.split(record_name)
dir_name = os.path.abspath(dir_name)
# Read the header fields
record = rdheader(record_name, pb_dir=pb_dir, rd_segments=False)
# Set defaults for sampto and channels input variables
if sampto is None:
# If the header does not contain the signal length, figure it
# out from the first dat file. This is only possible for single
# segment records. If there are no signals, sig_len is 0.
if record.sig_len is None:
if record.n_sig == 0:
record.sig_len = 0 # depends on [control=['if'], data=[]]
else:
record.sig_len = _signal._infer_sig_len(file_name=record.file_name[0], fmt=record.fmt[0], n_sig=record.file_name.count(record.file_name[0]), dir_name=dir_name, pb_dir=pb_dir) # depends on [control=['if'], data=[]]
sampto = record.sig_len # depends on [control=['if'], data=['sampto']]
# channel_names takes precedence over channels
if channel_names is not None:
# Figure out the channel indices matching the record, if any.
if isinstance(record, Record):
reference_record = record # depends on [control=['if'], data=[]]
elif record.layout == 'fixed':
# Find the first non-empty segment to get the signal
# names
first_seg_name = [n for n in record.seg_name if n != '~'][0]
reference_record = rdheader(os.path.join(dir_name, record.seg_name[0]), pb_dir=pb_dir) # depends on [control=['if'], data=[]]
else:
# Use the layout specification header to get the signal
# names
reference_record = rdheader(os.path.join(dir_name, record.seg_name[0]), pb_dir=pb_dir)
channels = _get_wanted_channels(wanted_sig_names=channel_names, record_sig_names=reference_record.sig_name) # depends on [control=['if'], data=['channel_names']]
elif channels is None:
channels = list(range(record.n_sig)) # depends on [control=['if'], data=['channels']]
# Ensure that input fields are valid for the record
record.check_read_inputs(sampfrom, sampto, channels, physical, smooth_frames, return_res)
# If the signal doesn't have the specified channels, there will be
# no signal. Recall that `rdsamp` is not called on segments of multi
# segment records if the channels are not present, so this won't
# break anything.
if not len(channels):
old_record = record
record = Record()
for attr in _header.RECORD_SPECS.index:
if attr == 'n_seg':
continue # depends on [control=['if'], data=[]]
elif attr in ['n_sig', 'sig_len']:
setattr(record, attr, 0) # depends on [control=['if'], data=['attr']]
else:
setattr(record, attr, getattr(old_record, attr)) # depends on [control=['for'], data=['attr']]
if warn_empty:
print('None of the specified signals were contained in the record') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# A single segment record
elif isinstance(record, Record):
# Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
if smooth_frames or max([record.samps_per_frame[c] for c in channels]) == 1:
# Read signals from the associated dat files that contain
# wanted channels
record.d_signal = _signal._rd_segment(record.file_name, dir_name, pb_dir, record.fmt, record.n_sig, record.sig_len, record.byte_offset, record.samps_per_frame, record.skew, sampfrom, sampto, channels, smooth_frames, ignore_skew)
# Arrange/edit the object fields to reflect user channel
# and/or signal range input
record._arrange_fields(channels=channels, sampfrom=sampfrom, expanded=False)
if physical:
# Perform inplace dac to get physical signal
record.dac(expanded=False, return_res=return_res, inplace=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Return each sample of the signals with multiple samples per frame
record.e_d_signal = _signal._rd_segment(record.file_name, dir_name, pb_dir, record.fmt, record.n_sig, record.sig_len, record.byte_offset, record.samps_per_frame, record.skew, sampfrom, sampto, channels, smooth_frames, ignore_skew)
# Arrange/edit the object fields to reflect user channel
# and/or signal range input
record._arrange_fields(channels=channels, sampfrom=sampfrom, expanded=True)
if physical:
# Perform dac to get physical signal
record.dac(expanded=True, return_res=return_res, inplace=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# A multi segment record
# Strategy:
# 1. Read the required segments and store them in
# Record objects.
# 2. Update the parameters of the objects to reflect
# the state of the sections read.
# 3. Update the parameters of the overall MultiRecord
# object to reflect the state of the individual segments.
# 4. If specified, convert the MultiRecord object
# into a single Record object.
# Segments field is a list of Record objects
# Empty segments store None.
record.segments = [None] * record.n_seg
# Variable layout, read the layout specification header
if record.layout == 'variable':
record.segments[0] = rdheader(os.path.join(dir_name, record.seg_name[0]), pb_dir=pb_dir) # depends on [control=['if'], data=[]]
# The segment numbers and samples within each segment to read.
(seg_numbers, seg_ranges) = record._required_segments(sampfrom, sampto)
# The channels within each segment to read
seg_channels = record._required_channels(seg_numbers, channels, dir_name, pb_dir)
# Read the desired samples in the relevant segments
for i in range(len(seg_numbers)):
seg_num = seg_numbers[i]
# Empty segment or segment with no relevant channels
if record.seg_name[seg_num] == '~' or len(seg_channels[i]) == 0:
record.segments[seg_num] = None # depends on [control=['if'], data=[]]
else:
record.segments[seg_num] = rdrecord(os.path.join(dir_name, record.seg_name[seg_num]), sampfrom=seg_ranges[i][0], sampto=seg_ranges[i][1], channels=seg_channels[i], physical=physical, pb_dir=pb_dir) # depends on [control=['for'], data=['i']]
# Arrange the fields of the layout specification segment, and
# the overall object, to reflect user input.
record._arrange_fields(seg_numbers=seg_numbers, seg_ranges=seg_ranges, channels=channels, sampfrom=sampfrom, force_channels=force_channels)
# Convert object into a single segment Record object
if m2s:
record = record.multi_to_single(physical=physical, return_res=return_res) # depends on [control=['if'], data=[]]
# Perform dtype conversion if necessary
if isinstance(record, Record) and record.n_sig > 0:
record.convert_dtype(physical, return_res, smooth_frames) # depends on [control=['if'], data=[]]
return record
|
def select_best_paths(examples):
"""
Process `examples`, select only paths that works for every example. Select
best paths with highest priority.
Args:
examples (dict): Output from :func:`.read_config`.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
"""
possible_paths = {} # {varname: [paths]}
# collect list of all possible paths to all existing variables
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, match in matching_elements.items():
if key not in possible_paths: # TODO: merge paths together?
possible_paths[key] = _collect_paths(match)
# leave only paths, that works in all examples where, are required
for example in examples:
dom = _create_dom(example["html"])
matching_elements = _match_elements(dom, example["vars"])
for key, paths in possible_paths.items():
if key not in matching_elements:
continue
possible_paths[key] = filter(
lambda path: _is_working_path(
dom,
path,
matching_elements[key]
),
paths
)
priorities = [
"find",
"left_neighbour_tag",
"right_neighbour_tag",
"wfind",
"match",
"Chained"
]
priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities)))
# sort all paths by priority table
for key in possible_paths.keys():
possible_paths[key] = list(sorted(
possible_paths[key],
key=lambda x: priorities.get(x.call_type, 100)
))
return possible_paths
|
def function[select_best_paths, parameter[examples]]:
constant[
Process `examples`, select only paths that works for every example. Select
best paths with highest priority.
Args:
examples (dict): Output from :func:`.read_config`.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
]
variable[possible_paths] assign[=] dictionary[[], []]
for taget[name[example]] in starred[name[examples]] begin[:]
variable[dom] assign[=] call[name[_create_dom], parameter[call[name[example]][constant[html]]]]
variable[matching_elements] assign[=] call[name[_match_elements], parameter[name[dom], call[name[example]][constant[vars]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2346800>, <ast.Name object at 0x7da1b2347e20>]]] in starred[call[name[matching_elements].items, parameter[]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[possible_paths]] begin[:]
call[name[possible_paths]][name[key]] assign[=] call[name[_collect_paths], parameter[name[match]]]
for taget[name[example]] in starred[name[examples]] begin[:]
variable[dom] assign[=] call[name[_create_dom], parameter[call[name[example]][constant[html]]]]
variable[matching_elements] assign[=] call[name[_match_elements], parameter[name[dom], call[name[example]][constant[vars]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2345900>, <ast.Name object at 0x7da1b26adf00>]]] in starred[call[name[possible_paths].items, parameter[]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[matching_elements]] begin[:]
continue
call[name[possible_paths]][name[key]] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da1b26ae5f0>, name[paths]]]
variable[priorities] assign[=] list[[<ast.Constant object at 0x7da1b26af790>, <ast.Constant object at 0x7da1b26ac700>, <ast.Constant object at 0x7da1b26ae560>, <ast.Constant object at 0x7da1b26adbd0>, <ast.Constant object at 0x7da1b26ae290>, <ast.Constant object at 0x7da1b26aed40>]]
variable[priorities] assign[=] call[name[dict], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b26ac340>, call[name[enumerate], parameter[name[priorities]]]]]]]
for taget[name[key]] in starred[call[name[possible_paths].keys, parameter[]]] begin[:]
call[name[possible_paths]][name[key]] assign[=] call[name[list], parameter[call[name[sorted], parameter[call[name[possible_paths]][name[key]]]]]]
return[name[possible_paths]]
|
keyword[def] identifier[select_best_paths] ( identifier[examples] ):
literal[string]
identifier[possible_paths] ={}
keyword[for] identifier[example] keyword[in] identifier[examples] :
identifier[dom] = identifier[_create_dom] ( identifier[example] [ literal[string] ])
identifier[matching_elements] = identifier[_match_elements] ( identifier[dom] , identifier[example] [ literal[string] ])
keyword[for] identifier[key] , identifier[match] keyword[in] identifier[matching_elements] . identifier[items] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[possible_paths] :
identifier[possible_paths] [ identifier[key] ]= identifier[_collect_paths] ( identifier[match] )
keyword[for] identifier[example] keyword[in] identifier[examples] :
identifier[dom] = identifier[_create_dom] ( identifier[example] [ literal[string] ])
identifier[matching_elements] = identifier[_match_elements] ( identifier[dom] , identifier[example] [ literal[string] ])
keyword[for] identifier[key] , identifier[paths] keyword[in] identifier[possible_paths] . identifier[items] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[matching_elements] :
keyword[continue]
identifier[possible_paths] [ identifier[key] ]= identifier[filter] (
keyword[lambda] identifier[path] : identifier[_is_working_path] (
identifier[dom] ,
identifier[path] ,
identifier[matching_elements] [ identifier[key] ]
),
identifier[paths]
)
identifier[priorities] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
]
identifier[priorities] = identifier[dict] ( identifier[map] ( keyword[lambda] identifier[x] :( identifier[x] [ literal[int] ], identifier[x] [ literal[int] ]), identifier[enumerate] ( identifier[priorities] )))
keyword[for] identifier[key] keyword[in] identifier[possible_paths] . identifier[keys] ():
identifier[possible_paths] [ identifier[key] ]= identifier[list] ( identifier[sorted] (
identifier[possible_paths] [ identifier[key] ],
identifier[key] = keyword[lambda] identifier[x] : identifier[priorities] . identifier[get] ( identifier[x] . identifier[call_type] , literal[int] )
))
keyword[return] identifier[possible_paths]
|
def select_best_paths(examples):
"""
Process `examples`, select only paths that works for every example. Select
best paths with highest priority.
Args:
examples (dict): Output from :func:`.read_config`.
Returns:
list: List of :class:`.PathCall` and :class:`.Chained` objects.
"""
possible_paths = {} # {varname: [paths]}
# collect list of all possible paths to all existing variables
for example in examples:
dom = _create_dom(example['html'])
matching_elements = _match_elements(dom, example['vars'])
for (key, match) in matching_elements.items():
if key not in possible_paths: # TODO: merge paths together?
possible_paths[key] = _collect_paths(match) # depends on [control=['if'], data=['key', 'possible_paths']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['example']]
# leave only paths, that works in all examples where, are required
for example in examples:
dom = _create_dom(example['html'])
matching_elements = _match_elements(dom, example['vars'])
for (key, paths) in possible_paths.items():
if key not in matching_elements:
continue # depends on [control=['if'], data=[]]
possible_paths[key] = filter(lambda path: _is_working_path(dom, path, matching_elements[key]), paths) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['example']]
priorities = ['find', 'left_neighbour_tag', 'right_neighbour_tag', 'wfind', 'match', 'Chained']
priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities)))
# sort all paths by priority table
for key in possible_paths.keys():
possible_paths[key] = list(sorted(possible_paths[key], key=lambda x: priorities.get(x.call_type, 100))) # depends on [control=['for'], data=['key']]
return possible_paths
|
def get_sources(self, cuts=None, distance=None, skydir=None,
minmax_ts=None, minmax_npred=None, exclude=None,
square=False):
"""Retrieve list of sources in the ROI satisfying the given
selections.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
coordsys = self.config['binning']['coordsys']
return self.roi.get_sources(skydir, distance, cuts,
minmax_ts, minmax_npred,
exclude, square,
coordsys=coordsys)
|
def function[get_sources, parameter[self, cuts, distance, skydir, minmax_ts, minmax_npred, exclude, square]]:
constant[Retrieve list of sources in the ROI satisfying the given
selections.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
]
variable[coordsys] assign[=] call[call[name[self].config][constant[binning]]][constant[coordsys]]
return[call[name[self].roi.get_sources, parameter[name[skydir], name[distance], name[cuts], name[minmax_ts], name[minmax_npred], name[exclude], name[square]]]]
|
keyword[def] identifier[get_sources] ( identifier[self] , identifier[cuts] = keyword[None] , identifier[distance] = keyword[None] , identifier[skydir] = keyword[None] ,
identifier[minmax_ts] = keyword[None] , identifier[minmax_npred] = keyword[None] , identifier[exclude] = keyword[None] ,
identifier[square] = keyword[False] ):
literal[string]
identifier[coordsys] = identifier[self] . identifier[config] [ literal[string] ][ literal[string] ]
keyword[return] identifier[self] . identifier[roi] . identifier[get_sources] ( identifier[skydir] , identifier[distance] , identifier[cuts] ,
identifier[minmax_ts] , identifier[minmax_npred] ,
identifier[exclude] , identifier[square] ,
identifier[coordsys] = identifier[coordsys] )
|
def get_sources(self, cuts=None, distance=None, skydir=None, minmax_ts=None, minmax_npred=None, exclude=None, square=False):
"""Retrieve list of sources in the ROI satisfying the given
selections.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
coordsys = self.config['binning']['coordsys']
return self.roi.get_sources(skydir, distance, cuts, minmax_ts, minmax_npred, exclude, square, coordsys=coordsys)
|
def to_dict(self):
"""Dict representation of the database as credentials plus tables dict representation."""
db_dict = self.credentials
db_dict.update(self.tables.to_dict())
return db_dict
|
def function[to_dict, parameter[self]]:
constant[Dict representation of the database as credentials plus tables dict representation.]
variable[db_dict] assign[=] name[self].credentials
call[name[db_dict].update, parameter[call[name[self].tables.to_dict, parameter[]]]]
return[name[db_dict]]
|
keyword[def] identifier[to_dict] ( identifier[self] ):
literal[string]
identifier[db_dict] = identifier[self] . identifier[credentials]
identifier[db_dict] . identifier[update] ( identifier[self] . identifier[tables] . identifier[to_dict] ())
keyword[return] identifier[db_dict]
|
def to_dict(self):
"""Dict representation of the database as credentials plus tables dict representation."""
db_dict = self.credentials
db_dict.update(self.tables.to_dict())
return db_dict
|
async def arun_process(path: Union[Path, str], target: Callable, *,
args: Tuple[Any]=(),
kwargs: Dict[str, Any]=None,
callback: Callable[[Set[Tuple[Change, str]]], Awaitable]=None,
watcher_cls: Type[AllWatcher]=PythonWatcher,
debounce=400,
min_sleep=100):
"""
Run a function in a subprocess using multiprocessing.Process, restart it whenever files change in path.
"""
watcher = awatch(path, watcher_cls=watcher_cls, debounce=debounce, min_sleep=min_sleep)
start_process = partial(_start_process, target=target, args=args, kwargs=kwargs)
process = await watcher.run_in_executor(start_process)
reloads = 0
async for changes in watcher:
callback and await callback(changes)
await watcher.run_in_executor(_stop_process, process)
process = await watcher.run_in_executor(start_process)
reloads += 1
return reloads
|
<ast.AsyncFunctionDef object at 0x7da20e963880>
|
keyword[async] keyword[def] identifier[arun_process] ( identifier[path] : identifier[Union] [ identifier[Path] , identifier[str] ], identifier[target] : identifier[Callable] ,*,
identifier[args] : identifier[Tuple] [ identifier[Any] ]=(),
identifier[kwargs] : identifier[Dict] [ identifier[str] , identifier[Any] ]= keyword[None] ,
identifier[callback] : identifier[Callable] [[ identifier[Set] [ identifier[Tuple] [ identifier[Change] , identifier[str] ]]], identifier[Awaitable] ]= keyword[None] ,
identifier[watcher_cls] : identifier[Type] [ identifier[AllWatcher] ]= identifier[PythonWatcher] ,
identifier[debounce] = literal[int] ,
identifier[min_sleep] = literal[int] ):
literal[string]
identifier[watcher] = identifier[awatch] ( identifier[path] , identifier[watcher_cls] = identifier[watcher_cls] , identifier[debounce] = identifier[debounce] , identifier[min_sleep] = identifier[min_sleep] )
identifier[start_process] = identifier[partial] ( identifier[_start_process] , identifier[target] = identifier[target] , identifier[args] = identifier[args] , identifier[kwargs] = identifier[kwargs] )
identifier[process] = keyword[await] identifier[watcher] . identifier[run_in_executor] ( identifier[start_process] )
identifier[reloads] = literal[int]
keyword[async] keyword[for] identifier[changes] keyword[in] identifier[watcher] :
identifier[callback] keyword[and] keyword[await] identifier[callback] ( identifier[changes] )
keyword[await] identifier[watcher] . identifier[run_in_executor] ( identifier[_stop_process] , identifier[process] )
identifier[process] = keyword[await] identifier[watcher] . identifier[run_in_executor] ( identifier[start_process] )
identifier[reloads] += literal[int]
keyword[return] identifier[reloads]
|
async def arun_process(path: Union[Path, str], target: Callable, *, args: Tuple[Any]=(), kwargs: Dict[str, Any]=None, callback: Callable[[Set[Tuple[Change, str]]], Awaitable]=None, watcher_cls: Type[AllWatcher]=PythonWatcher, debounce=400, min_sleep=100):
"""
Run a function in a subprocess using multiprocessing.Process, restart it whenever files change in path.
"""
watcher = awatch(path, watcher_cls=watcher_cls, debounce=debounce, min_sleep=min_sleep)
start_process = partial(_start_process, target=target, args=args, kwargs=kwargs)
process = await watcher.run_in_executor(start_process)
reloads = 0
async for changes in watcher:
callback and await callback(changes)
await watcher.run_in_executor(_stop_process, process)
process = await watcher.run_in_executor(start_process)
reloads += 1
return reloads
|
def update(self, *args, **kwargs):
"""d.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
Overwrite the values in `d` with the keys from `E` and `F`. If any key
in `value` is invalid in `d`, ``KeyError`` is raised.
This method is atomic - either all values in `value` are set in `d`, or
none are. ``update`` does not commit a partially-updated version to
disk.
For kwargs, Seismic Unix-style names are supported. `BinField` and
`TraceField` are not, because there are name collisions between them,
although this restriction may be lifted in the future.
Notes
-----
.. versionchanged:: 1.3
Support for common dict operations (update, keys, values)
.. versionchanged:: 1.6
Atomicity guarantee
.. versionchanged:: 1.6
`**kwargs` support
Examples
--------
>>> e = { 1: 10, 9: 5 }
>>> d.update(e)
>>> l = [ (105, 11), (169, 4) ]
>>> d.update(l)
>>> d.update(e, iline=189, xline=193, hour=5)
>>> d.update(sx=7)
"""
if len(args) > 1:
msg = 'update expected at most 1 non-keyword argument, got {}'
raise TypeError(msg.format(len(args)))
buf = bytearray(self.buf)
# Implementation largely borrowed from collections.mapping
# If E present and has a .keys() method: for k in E: D[k] = E[k]
# If E present and lacks .keys() method: for (k, v) in E: D[k] = v
# In either case, this is followed by: for k, v in F.items(): D[k] = v
if len(args) == 1:
other = args[0]
if isinstance(other, collections.Mapping):
for key in other:
self.putfield(buf, int(key), other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.putfield(buf, int(key), other[key])
else:
for key, value in other:
self.putfield(buf, int(key), value)
for key, value in kwargs.items():
self.putfield(buf, int(self._kwargs[key]), value)
self.buf = buf
self.flush()
|
def function[update, parameter[self]]:
constant[d.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
Overwrite the values in `d` with the keys from `E` and `F`. If any key
in `value` is invalid in `d`, ``KeyError`` is raised.
This method is atomic - either all values in `value` are set in `d`, or
none are. ``update`` does not commit a partially-updated version to
disk.
For kwargs, Seismic Unix-style names are supported. `BinField` and
`TraceField` are not, because there are name collisions between them,
although this restriction may be lifted in the future.
Notes
-----
.. versionchanged:: 1.3
Support for common dict operations (update, keys, values)
.. versionchanged:: 1.6
Atomicity guarantee
.. versionchanged:: 1.6
`**kwargs` support
Examples
--------
>>> e = { 1: 10, 9: 5 }
>>> d.update(e)
>>> l = [ (105, 11), (169, 4) ]
>>> d.update(l)
>>> d.update(e, iline=189, xline=193, hour=5)
>>> d.update(sx=7)
]
if compare[call[name[len], parameter[name[args]]] greater[>] constant[1]] begin[:]
variable[msg] assign[=] constant[update expected at most 1 non-keyword argument, got {}]
<ast.Raise object at 0x7da2101f5060>
variable[buf] assign[=] call[name[bytearray], parameter[name[self].buf]]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
variable[other] assign[=] call[name[args]][constant[0]]
if call[name[isinstance], parameter[name[other], name[collections].Mapping]] begin[:]
for taget[name[key]] in starred[name[other]] begin[:]
call[name[self].putfield, parameter[name[buf], call[name[int], parameter[name[key]]], call[name[other]][name[key]]]]
for taget[tuple[[<ast.Name object at 0x7da1b17b4b80>, <ast.Name object at 0x7da1b17b7970>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
call[name[self].putfield, parameter[name[buf], call[name[int], parameter[call[name[self]._kwargs][name[key]]]], name[value]]]
name[self].buf assign[=] name[buf]
call[name[self].flush, parameter[]]
|
keyword[def] identifier[update] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )> literal[int] :
identifier[msg] = literal[string]
keyword[raise] identifier[TypeError] ( identifier[msg] . identifier[format] ( identifier[len] ( identifier[args] )))
identifier[buf] = identifier[bytearray] ( identifier[self] . identifier[buf] )
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[other] = identifier[args] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[other] , identifier[collections] . identifier[Mapping] ):
keyword[for] identifier[key] keyword[in] identifier[other] :
identifier[self] . identifier[putfield] ( identifier[buf] , identifier[int] ( identifier[key] ), identifier[other] [ identifier[key] ])
keyword[elif] identifier[hasattr] ( identifier[other] , literal[string] ):
keyword[for] identifier[key] keyword[in] identifier[other] . identifier[keys] ():
identifier[self] . identifier[putfield] ( identifier[buf] , identifier[int] ( identifier[key] ), identifier[other] [ identifier[key] ])
keyword[else] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[other] :
identifier[self] . identifier[putfield] ( identifier[buf] , identifier[int] ( identifier[key] ), identifier[value] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ():
identifier[self] . identifier[putfield] ( identifier[buf] , identifier[int] ( identifier[self] . identifier[_kwargs] [ identifier[key] ]), identifier[value] )
identifier[self] . identifier[buf] = identifier[buf]
identifier[self] . identifier[flush] ()
|
def update(self, *args, **kwargs):
"""d.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
Overwrite the values in `d` with the keys from `E` and `F`. If any key
in `value` is invalid in `d`, ``KeyError`` is raised.
This method is atomic - either all values in `value` are set in `d`, or
none are. ``update`` does not commit a partially-updated version to
disk.
For kwargs, Seismic Unix-style names are supported. `BinField` and
`TraceField` are not, because there are name collisions between them,
although this restriction may be lifted in the future.
Notes
-----
.. versionchanged:: 1.3
Support for common dict operations (update, keys, values)
.. versionchanged:: 1.6
Atomicity guarantee
.. versionchanged:: 1.6
`**kwargs` support
Examples
--------
>>> e = { 1: 10, 9: 5 }
>>> d.update(e)
>>> l = [ (105, 11), (169, 4) ]
>>> d.update(l)
>>> d.update(e, iline=189, xline=193, hour=5)
>>> d.update(sx=7)
"""
if len(args) > 1:
msg = 'update expected at most 1 non-keyword argument, got {}'
raise TypeError(msg.format(len(args))) # depends on [control=['if'], data=[]]
buf = bytearray(self.buf)
# Implementation largely borrowed from collections.mapping
# If E present and has a .keys() method: for k in E: D[k] = E[k]
# If E present and lacks .keys() method: for (k, v) in E: D[k] = v
# In either case, this is followed by: for k, v in F.items(): D[k] = v
if len(args) == 1:
other = args[0]
if isinstance(other, collections.Mapping):
for key in other:
self.putfield(buf, int(key), other[key]) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
elif hasattr(other, 'keys'):
for key in other.keys():
self.putfield(buf, int(key), other[key]) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
else:
for (key, value) in other:
self.putfield(buf, int(key), value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
for (key, value) in kwargs.items():
self.putfield(buf, int(self._kwargs[key]), value) # depends on [control=['for'], data=[]]
self.buf = buf
self.flush()
|
def check_keystore_json(jsondata: Dict) -> bool:
""" Check if ``jsondata`` has the structure of a keystore file version 3.
Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters.
Copied from https://github.com/vbuterin/pybitcointools
Args:
jsondata: Dictionary containing the data from the json file
Returns:
`True` if the data appears to be valid, otherwise `False`
"""
if 'crypto' not in jsondata and 'Crypto' not in jsondata:
return False
if 'version' not in jsondata:
return False
if jsondata['version'] != 3:
return False
crypto = jsondata.get('crypto', jsondata.get('Crypto'))
if 'cipher' not in crypto:
return False
if 'ciphertext' not in crypto:
return False
if 'kdf' not in crypto:
return False
if 'mac' not in crypto:
return False
return True
|
def function[check_keystore_json, parameter[jsondata]]:
constant[ Check if ``jsondata`` has the structure of a keystore file version 3.
Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters.
Copied from https://github.com/vbuterin/pybitcointools
Args:
jsondata: Dictionary containing the data from the json file
Returns:
`True` if the data appears to be valid, otherwise `False`
]
if <ast.BoolOp object at 0x7da1b1907370> begin[:]
return[constant[False]]
if compare[constant[version] <ast.NotIn object at 0x7da2590d7190> name[jsondata]] begin[:]
return[constant[False]]
if compare[call[name[jsondata]][constant[version]] not_equal[!=] constant[3]] begin[:]
return[constant[False]]
variable[crypto] assign[=] call[name[jsondata].get, parameter[constant[crypto], call[name[jsondata].get, parameter[constant[Crypto]]]]]
if compare[constant[cipher] <ast.NotIn object at 0x7da2590d7190> name[crypto]] begin[:]
return[constant[False]]
if compare[constant[ciphertext] <ast.NotIn object at 0x7da2590d7190> name[crypto]] begin[:]
return[constant[False]]
if compare[constant[kdf] <ast.NotIn object at 0x7da2590d7190> name[crypto]] begin[:]
return[constant[False]]
if compare[constant[mac] <ast.NotIn object at 0x7da2590d7190> name[crypto]] begin[:]
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[check_keystore_json] ( identifier[jsondata] : identifier[Dict] )-> identifier[bool] :
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[jsondata] keyword[and] literal[string] keyword[not] keyword[in] identifier[jsondata] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[jsondata] :
keyword[return] keyword[False]
keyword[if] identifier[jsondata] [ literal[string] ]!= literal[int] :
keyword[return] keyword[False]
identifier[crypto] = identifier[jsondata] . identifier[get] ( literal[string] , identifier[jsondata] . identifier[get] ( literal[string] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[crypto] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[crypto] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[crypto] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[crypto] :
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def check_keystore_json(jsondata: Dict) -> bool:
""" Check if ``jsondata`` has the structure of a keystore file version 3.
Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters.
Copied from https://github.com/vbuterin/pybitcointools
Args:
jsondata: Dictionary containing the data from the json file
Returns:
`True` if the data appears to be valid, otherwise `False`
"""
if 'crypto' not in jsondata and 'Crypto' not in jsondata:
return False # depends on [control=['if'], data=[]]
if 'version' not in jsondata:
return False # depends on [control=['if'], data=[]]
if jsondata['version'] != 3:
return False # depends on [control=['if'], data=[]]
crypto = jsondata.get('crypto', jsondata.get('Crypto'))
if 'cipher' not in crypto:
return False # depends on [control=['if'], data=[]]
if 'ciphertext' not in crypto:
return False # depends on [control=['if'], data=[]]
if 'kdf' not in crypto:
return False # depends on [control=['if'], data=[]]
if 'mac' not in crypto:
return False # depends on [control=['if'], data=[]]
return True
|
def read_extended_header(self, groupby='field', force_type=''):
"""Read the extended header according to `extended_header_type`.
Currently, only the FEI extended header format is supported.
See `print_fei_ext_header_spec` or `this homepage`_ for the format
specification.
The extended header usually has one header section per
image (slice), in case of the FEI header 128 bytes each, with
a total of 1024 sections.
Parameters
----------
groupby : {'field', 'section'}, optional
How to group the values in the extended header sections.
``'field'`` : make an array per section field, e.g.::
'defocus': [dval1, dval2, ..., dval1024],
'exp_time': [tval1, tval2, ..., tval1024],
...
``'section'`` : make a dictionary for each section, e.g.::
{'defocus': dval1, 'exp_time': tval1},
{'defocus': dval2, 'exp_time': tval2},
...
If the number of images is smaller than 1024, the last values are
all set to zero.
force_type : string, optional
If given, this value overrides the `extended_header_type`
from `header`.
Currently supported: ``'FEI1'``
Returns
-------
ext_header: `OrderedDict` or tuple
For ``groupby == 'field'``, a dictionary with the field names
as keys, like in the example.
For ``groupby == 'section'``, a tuple of dictionaries as
shown above.
The returned data structures store no offsets, in contrast
to the regular header.
See Also
--------
References
----------
.. _this homepage:
http://www.2dx.unibas.ch/documentation/mrc-software/fei-\
extended-mrc-format-not-used-by-2dx
"""
ext_header_type = str(force_type).upper() or self.extended_header_type
if ext_header_type != 'FEI1':
raise ValueError("extended header type '{}' not supported"
"".format(self.extended_header_type))
groupby, groupby_in = str(groupby).lower(), groupby
ext_header_len = int(self.header['nsymbt']['value'])
if ext_header_len % MRC_FEI_SECTION_SIZE:
raise ValueError('extended header length {} from header is '
'not divisible by extended header section size '
'{}'.format(ext_header_len, MRC_FEI_SECTION_SIZE))
num_sections = ext_header_len // MRC_FEI_SECTION_SIZE
if num_sections != MRC_FEI_NUM_SECTIONS:
raise ValueError('calculated number of sections ({}) not equal to '
'expected number of sections ({})'
''.format(num_sections, MRC_FEI_NUM_SECTIONS))
section_fields = header_fields_from_table(
MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS,
dtype_map=MRC_DTYPE_TO_NPY_DTYPE)
# Make a list for each field and append the values for that
# field. Then create an array from that list and store it
# under the field name.
ext_header = OrderedDict()
for field in section_fields:
value_list = []
field_offset = field['offset']
field_dtype = field['dtype']
field_dshape = field['dshape']
# Compute some parameters
num_items = int(np.prod(field_dshape))
size_bytes = num_items * field_dtype.itemsize
fmt = '{}{}'.format(num_items, field_dtype.char)
for section in range(num_sections):
# Get the bytestring from the right position in the file,
# unpack it and append the value to the list.
start = section * MRC_FEI_SECTION_SIZE + field_offset
self.file.seek(start)
packed_value = self.file.read(size_bytes)
value_list.append(struct.unpack(fmt, packed_value))
ext_header[field['name']] = np.array(value_list, dtype=field_dtype)
if groupby == 'field':
return ext_header
elif groupby == 'section':
# Transpose the data and return as tuple.
return tuple({key: ext_header[key][i] for key in ext_header}
for i in range(num_sections))
else:
raise ValueError("`groupby` '{}' not understood"
"".format(groupby_in))
|
def function[read_extended_header, parameter[self, groupby, force_type]]:
constant[Read the extended header according to `extended_header_type`.
Currently, only the FEI extended header format is supported.
See `print_fei_ext_header_spec` or `this homepage`_ for the format
specification.
The extended header usually has one header section per
image (slice), in case of the FEI header 128 bytes each, with
a total of 1024 sections.
Parameters
----------
groupby : {'field', 'section'}, optional
How to group the values in the extended header sections.
``'field'`` : make an array per section field, e.g.::
'defocus': [dval1, dval2, ..., dval1024],
'exp_time': [tval1, tval2, ..., tval1024],
...
``'section'`` : make a dictionary for each section, e.g.::
{'defocus': dval1, 'exp_time': tval1},
{'defocus': dval2, 'exp_time': tval2},
...
If the number of images is smaller than 1024, the last values are
all set to zero.
force_type : string, optional
If given, this value overrides the `extended_header_type`
from `header`.
Currently supported: ``'FEI1'``
Returns
-------
ext_header: `OrderedDict` or tuple
For ``groupby == 'field'``, a dictionary with the field names
as keys, like in the example.
For ``groupby == 'section'``, a tuple of dictionaries as
shown above.
The returned data structures store no offsets, in contrast
to the regular header.
See Also
--------
References
----------
.. _this homepage:
http://www.2dx.unibas.ch/documentation/mrc-software/fei-extended-mrc-format-not-used-by-2dx
]
variable[ext_header_type] assign[=] <ast.BoolOp object at 0x7da1b1e795a0>
if compare[name[ext_header_type] not_equal[!=] constant[FEI1]] begin[:]
<ast.Raise object at 0x7da1b1e7abc0>
<ast.Tuple object at 0x7da18f58dde0> assign[=] tuple[[<ast.Call object at 0x7da18f58ed40>, <ast.Name object at 0x7da18f58d3c0>]]
variable[ext_header_len] assign[=] call[name[int], parameter[call[call[name[self].header][constant[nsymbt]]][constant[value]]]]
if binary_operation[name[ext_header_len] <ast.Mod object at 0x7da2590d6920> name[MRC_FEI_SECTION_SIZE]] begin[:]
<ast.Raise object at 0x7da18f58fb20>
variable[num_sections] assign[=] binary_operation[name[ext_header_len] <ast.FloorDiv object at 0x7da2590d6bc0> name[MRC_FEI_SECTION_SIZE]]
if compare[name[num_sections] not_equal[!=] name[MRC_FEI_NUM_SECTIONS]] begin[:]
<ast.Raise object at 0x7da18f58c1f0>
variable[section_fields] assign[=] call[name[header_fields_from_table], parameter[name[MRC_FEI_EXT_HEADER_SECTION]]]
variable[ext_header] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[field]] in starred[name[section_fields]] begin[:]
variable[value_list] assign[=] list[[]]
variable[field_offset] assign[=] call[name[field]][constant[offset]]
variable[field_dtype] assign[=] call[name[field]][constant[dtype]]
variable[field_dshape] assign[=] call[name[field]][constant[dshape]]
variable[num_items] assign[=] call[name[int], parameter[call[name[np].prod, parameter[name[field_dshape]]]]]
variable[size_bytes] assign[=] binary_operation[name[num_items] * name[field_dtype].itemsize]
variable[fmt] assign[=] call[constant[{}{}].format, parameter[name[num_items], name[field_dtype].char]]
for taget[name[section]] in starred[call[name[range], parameter[name[num_sections]]]] begin[:]
variable[start] assign[=] binary_operation[binary_operation[name[section] * name[MRC_FEI_SECTION_SIZE]] + name[field_offset]]
call[name[self].file.seek, parameter[name[start]]]
variable[packed_value] assign[=] call[name[self].file.read, parameter[name[size_bytes]]]
call[name[value_list].append, parameter[call[name[struct].unpack, parameter[name[fmt], name[packed_value]]]]]
call[name[ext_header]][call[name[field]][constant[name]]] assign[=] call[name[np].array, parameter[name[value_list]]]
if compare[name[groupby] equal[==] constant[field]] begin[:]
return[name[ext_header]]
|
keyword[def] identifier[read_extended_header] ( identifier[self] , identifier[groupby] = literal[string] , identifier[force_type] = literal[string] ):
literal[string]
identifier[ext_header_type] = identifier[str] ( identifier[force_type] ). identifier[upper] () keyword[or] identifier[self] . identifier[extended_header_type]
keyword[if] identifier[ext_header_type] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[extended_header_type] ))
identifier[groupby] , identifier[groupby_in] = identifier[str] ( identifier[groupby] ). identifier[lower] (), identifier[groupby]
identifier[ext_header_len] = identifier[int] ( identifier[self] . identifier[header] [ literal[string] ][ literal[string] ])
keyword[if] identifier[ext_header_len] % identifier[MRC_FEI_SECTION_SIZE] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[ext_header_len] , identifier[MRC_FEI_SECTION_SIZE] ))
identifier[num_sections] = identifier[ext_header_len] // identifier[MRC_FEI_SECTION_SIZE]
keyword[if] identifier[num_sections] != identifier[MRC_FEI_NUM_SECTIONS] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[num_sections] , identifier[MRC_FEI_NUM_SECTIONS] ))
identifier[section_fields] = identifier[header_fields_from_table] (
identifier[MRC_FEI_EXT_HEADER_SECTION] , identifier[keys] = identifier[MRC_SPEC_KEYS] ,
identifier[dtype_map] = identifier[MRC_DTYPE_TO_NPY_DTYPE] )
identifier[ext_header] = identifier[OrderedDict] ()
keyword[for] identifier[field] keyword[in] identifier[section_fields] :
identifier[value_list] =[]
identifier[field_offset] = identifier[field] [ literal[string] ]
identifier[field_dtype] = identifier[field] [ literal[string] ]
identifier[field_dshape] = identifier[field] [ literal[string] ]
identifier[num_items] = identifier[int] ( identifier[np] . identifier[prod] ( identifier[field_dshape] ))
identifier[size_bytes] = identifier[num_items] * identifier[field_dtype] . identifier[itemsize]
identifier[fmt] = literal[string] . identifier[format] ( identifier[num_items] , identifier[field_dtype] . identifier[char] )
keyword[for] identifier[section] keyword[in] identifier[range] ( identifier[num_sections] ):
identifier[start] = identifier[section] * identifier[MRC_FEI_SECTION_SIZE] + identifier[field_offset]
identifier[self] . identifier[file] . identifier[seek] ( identifier[start] )
identifier[packed_value] = identifier[self] . identifier[file] . identifier[read] ( identifier[size_bytes] )
identifier[value_list] . identifier[append] ( identifier[struct] . identifier[unpack] ( identifier[fmt] , identifier[packed_value] ))
identifier[ext_header] [ identifier[field] [ literal[string] ]]= identifier[np] . identifier[array] ( identifier[value_list] , identifier[dtype] = identifier[field_dtype] )
keyword[if] identifier[groupby] == literal[string] :
keyword[return] identifier[ext_header]
keyword[elif] identifier[groupby] == literal[string] :
keyword[return] identifier[tuple] ({ identifier[key] : identifier[ext_header] [ identifier[key] ][ identifier[i] ] keyword[for] identifier[key] keyword[in] identifier[ext_header] }
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_sections] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[groupby_in] ))
|
def read_extended_header(self, groupby='field', force_type=''):
"""Read the extended header according to `extended_header_type`.
Currently, only the FEI extended header format is supported.
See `print_fei_ext_header_spec` or `this homepage`_ for the format
specification.
The extended header usually has one header section per
image (slice), in case of the FEI header 128 bytes each, with
a total of 1024 sections.
Parameters
----------
groupby : {'field', 'section'}, optional
How to group the values in the extended header sections.
``'field'`` : make an array per section field, e.g.::
'defocus': [dval1, dval2, ..., dval1024],
'exp_time': [tval1, tval2, ..., tval1024],
...
``'section'`` : make a dictionary for each section, e.g.::
{'defocus': dval1, 'exp_time': tval1},
{'defocus': dval2, 'exp_time': tval2},
...
If the number of images is smaller than 1024, the last values are
all set to zero.
force_type : string, optional
If given, this value overrides the `extended_header_type`
from `header`.
Currently supported: ``'FEI1'``
Returns
-------
ext_header: `OrderedDict` or tuple
For ``groupby == 'field'``, a dictionary with the field names
as keys, like in the example.
For ``groupby == 'section'``, a tuple of dictionaries as
shown above.
The returned data structures store no offsets, in contrast
to the regular header.
See Also
--------
References
----------
.. _this homepage:
http://www.2dx.unibas.ch/documentation/mrc-software/fei-extended-mrc-format-not-used-by-2dx
"""
ext_header_type = str(force_type).upper() or self.extended_header_type
if ext_header_type != 'FEI1':
raise ValueError("extended header type '{}' not supported".format(self.extended_header_type)) # depends on [control=['if'], data=[]]
(groupby, groupby_in) = (str(groupby).lower(), groupby)
ext_header_len = int(self.header['nsymbt']['value'])
if ext_header_len % MRC_FEI_SECTION_SIZE:
raise ValueError('extended header length {} from header is not divisible by extended header section size {}'.format(ext_header_len, MRC_FEI_SECTION_SIZE)) # depends on [control=['if'], data=[]]
num_sections = ext_header_len // MRC_FEI_SECTION_SIZE
if num_sections != MRC_FEI_NUM_SECTIONS:
raise ValueError('calculated number of sections ({}) not equal to expected number of sections ({})'.format(num_sections, MRC_FEI_NUM_SECTIONS)) # depends on [control=['if'], data=['num_sections', 'MRC_FEI_NUM_SECTIONS']]
section_fields = header_fields_from_table(MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS, dtype_map=MRC_DTYPE_TO_NPY_DTYPE)
# Make a list for each field and append the values for that
# field. Then create an array from that list and store it
# under the field name.
ext_header = OrderedDict()
for field in section_fields:
value_list = []
field_offset = field['offset']
field_dtype = field['dtype']
field_dshape = field['dshape']
# Compute some parameters
num_items = int(np.prod(field_dshape))
size_bytes = num_items * field_dtype.itemsize
fmt = '{}{}'.format(num_items, field_dtype.char)
for section in range(num_sections):
# Get the bytestring from the right position in the file,
# unpack it and append the value to the list.
start = section * MRC_FEI_SECTION_SIZE + field_offset
self.file.seek(start)
packed_value = self.file.read(size_bytes)
value_list.append(struct.unpack(fmt, packed_value)) # depends on [control=['for'], data=['section']]
ext_header[field['name']] = np.array(value_list, dtype=field_dtype) # depends on [control=['for'], data=['field']]
if groupby == 'field':
return ext_header # depends on [control=['if'], data=[]]
elif groupby == 'section':
# Transpose the data and return as tuple.
return tuple(({key: ext_header[key][i] for key in ext_header} for i in range(num_sections))) # depends on [control=['if'], data=[]]
else:
raise ValueError("`groupby` '{}' not understood".format(groupby_in))
|
def add_codedValue(self, name, code):
""" adds a value to the coded value list """
if self._codedValues is None:
self._codedValues = []
self._codedValues.append(
{"name": name, "code": code}
)
|
def function[add_codedValue, parameter[self, name, code]]:
constant[ adds a value to the coded value list ]
if compare[name[self]._codedValues is constant[None]] begin[:]
name[self]._codedValues assign[=] list[[]]
call[name[self]._codedValues.append, parameter[dictionary[[<ast.Constant object at 0x7da18dc98f70>, <ast.Constant object at 0x7da18dc9b0a0>], [<ast.Name object at 0x7da18dc98310>, <ast.Name object at 0x7da18dc99ed0>]]]]
|
keyword[def] identifier[add_codedValue] ( identifier[self] , identifier[name] , identifier[code] ):
literal[string]
keyword[if] identifier[self] . identifier[_codedValues] keyword[is] keyword[None] :
identifier[self] . identifier[_codedValues] =[]
identifier[self] . identifier[_codedValues] . identifier[append] (
{ literal[string] : identifier[name] , literal[string] : identifier[code] }
)
|
def add_codedValue(self, name, code):
""" adds a value to the coded value list """
if self._codedValues is None:
self._codedValues = [] # depends on [control=['if'], data=[]]
self._codedValues.append({'name': name, 'code': code})
|
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug(
"OplogThread: Initiating rollback sequence to bring "
"system into a consistent state."
)
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(
last_docs, key=lambda x: x["_ts"] if x else float("-inf")
)
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc["_ts"])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{"ts": {"$lte": target_ts}, "op": {"$ne": "n"}},
sort=[("$natural", pymongo.DESCENDING)],
)
LOG.debug("OplogThread: last oplog entry is %s" % str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry["ts"]
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc["_ts"]
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc["ns"] in rollback_set:
rollback_set[doc["ns"]].append(doc)
else:
rollback_set[doc["ns"]] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(namespace)
if not original_namespace:
original_namespace = namespace
database, coll = original_namespace.split(".", 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc["_id"]) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{"_id": {"$in": bson_obj_id_list}},
projection=self.namespace_config.projection(original_namespace),
)
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc["_id"])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc["_id"] in doc_hash:
del doc_hash[doc["_id"]]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent " "docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(
document_id,
namespace,
util.bson_ts_to_long(rollback_cutoff_ts),
)
remov_inc += 1
LOG.debug("OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." % remov_inc)
# Insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents " "from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(
doc, namespace, util.bson_ts_to_long(rollback_cutoff_ts)
)
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception(
"OplogThread: Rollback, Unable to " "insert %r" % doc
)
LOG.debug(
"OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts))
)
return rollback_cutoff_ts
|
def function[rollback, parameter[self]]:
constant[Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
]
call[name[LOG].debug, parameter[constant[OplogThread: Initiating rollback sequence to bring system into a consistent state.]]]
variable[last_docs] assign[=] list[[]]
for taget[name[dm]] in starred[name[self].doc_managers] begin[:]
call[name[dm].commit, parameter[]]
call[name[last_docs].append, parameter[call[name[dm].get_last_doc, parameter[]]]]
variable[last_inserted_doc] assign[=] call[name[max], parameter[name[last_docs]]]
if compare[name[last_inserted_doc] is constant[None]] begin[:]
return[constant[None]]
variable[target_ts] assign[=] call[name[util].long_to_bson_ts, parameter[call[name[last_inserted_doc]][constant[_ts]]]]
variable[last_oplog_entry] assign[=] call[name[util].retry_until_ok, parameter[name[self].oplog.find_one, dictionary[[<ast.Constant object at 0x7da1b1d38220>, <ast.Constant object at 0x7da1b1e67100>], [<ast.Dict object at 0x7da1b1e665c0>, <ast.Dict object at 0x7da1b1e65150>]]]]
call[name[LOG].debug, parameter[binary_operation[constant[OplogThread: last oplog entry is %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[last_oplog_entry]]]]]]
if compare[name[last_oplog_entry] is constant[None]] begin[:]
return[constant[None]]
variable[rollback_cutoff_ts] assign[=] call[name[last_oplog_entry]][constant[ts]]
variable[start_ts] assign[=] call[name[util].bson_ts_to_long, parameter[name[rollback_cutoff_ts]]]
variable[end_ts] assign[=] call[name[last_inserted_doc]][constant[_ts]]
for taget[name[dm]] in starred[name[self].doc_managers] begin[:]
variable[rollback_set] assign[=] dictionary[[], []]
for taget[name[doc]] in starred[call[name[dm].search, parameter[name[start_ts], name[end_ts]]]] begin[:]
if compare[call[name[doc]][constant[ns]] in name[rollback_set]] begin[:]
call[call[name[rollback_set]][call[name[doc]][constant[ns]]].append, parameter[name[doc]]]
for taget[tuple[[<ast.Name object at 0x7da1b1e66b60>, <ast.Name object at 0x7da1b1e64340>]]] in starred[call[name[rollback_set].items, parameter[]]] begin[:]
variable[original_namespace] assign[=] call[name[self].namespace_config.unmap_namespace, parameter[name[namespace]]]
if <ast.UnaryOp object at 0x7da1b1e66170> begin[:]
variable[original_namespace] assign[=] name[namespace]
<ast.Tuple object at 0x7da1b1e64ee0> assign[=] call[name[original_namespace].split, parameter[constant[.], constant[1]]]
variable[obj_id] assign[=] name[bson].objectid.ObjectId
variable[bson_obj_id_list] assign[=] <ast.ListComp object at 0x7da1b1e94f70>
variable[client] assign[=] <ast.BoolOp object at 0x7da1b1e95090>
variable[to_update] assign[=] call[name[util].retry_until_ok, parameter[call[call[name[client]][name[database]]][name[coll]].find, dictionary[[<ast.Constant object at 0x7da1b1e94460>], [<ast.Dict object at 0x7da1b1e94670>]]]]
variable[doc_hash] assign[=] dictionary[[], []]
for taget[name[doc]] in starred[name[doc_list]] begin[:]
call[name[doc_hash]][call[name[bson].objectid.ObjectId, parameter[call[name[doc]][constant[_id]]]]] assign[=] name[doc]
variable[to_index] assign[=] list[[]]
def function[collect_existing_docs, parameter[]]:
for taget[name[doc]] in starred[name[to_update]] begin[:]
if compare[call[name[doc]][constant[_id]] in name[doc_hash]] begin[:]
<ast.Delete object at 0x7da1b1e7d1b0>
call[name[to_index].append, parameter[name[doc]]]
call[name[retry_until_ok], parameter[name[collect_existing_docs]]]
call[name[LOG].debug, parameter[constant[OplogThread: Rollback, removing inconsistent docs.]]]
variable[remov_inc] assign[=] constant[0]
for taget[name[document_id]] in starred[name[doc_hash]] begin[:]
<ast.Try object at 0x7da1b1e7d5a0>
call[name[LOG].debug, parameter[binary_operation[constant[OplogThread: Rollback, removed %d docs.] <ast.Mod object at 0x7da2590d6920> name[remov_inc]]]]
call[name[LOG].debug, parameter[constant[OplogThread: Rollback, inserting documents from mongo.]]]
variable[insert_inc] assign[=] constant[0]
variable[fail_insert_inc] assign[=] constant[0]
for taget[name[doc]] in starred[name[to_index]] begin[:]
<ast.Try object at 0x7da1b1e7cb80>
call[name[LOG].debug, parameter[binary_operation[constant[OplogThread: Rollback, Successfully inserted %d documents and failed to insert %d documents. Returning a rollback cutoff time of %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1e30310>, <ast.Name object at 0x7da1b1e30220>, <ast.Call object at 0x7da1b1e30130>]]]]]
return[name[rollback_cutoff_ts]]
|
keyword[def] identifier[rollback] ( identifier[self] ):
literal[string]
identifier[LOG] . identifier[debug] (
literal[string]
literal[string]
)
identifier[last_docs] =[]
keyword[for] identifier[dm] keyword[in] identifier[self] . identifier[doc_managers] :
identifier[dm] . identifier[commit] ()
identifier[last_docs] . identifier[append] ( identifier[dm] . identifier[get_last_doc] ())
identifier[last_inserted_doc] = identifier[max] (
identifier[last_docs] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ] keyword[if] identifier[x] keyword[else] identifier[float] ( literal[string] )
)
keyword[if] identifier[last_inserted_doc] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[target_ts] = identifier[util] . identifier[long_to_bson_ts] ( identifier[last_inserted_doc] [ literal[string] ])
identifier[last_oplog_entry] = identifier[util] . identifier[retry_until_ok] (
identifier[self] . identifier[oplog] . identifier[find_one] ,
{ literal[string] :{ literal[string] : identifier[target_ts] }, literal[string] :{ literal[string] : literal[string] }},
identifier[sort] =[( literal[string] , identifier[pymongo] . identifier[DESCENDING] )],
)
identifier[LOG] . identifier[debug] ( literal[string] % identifier[str] ( identifier[last_oplog_entry] ))
keyword[if] identifier[last_oplog_entry] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[rollback_cutoff_ts] = identifier[last_oplog_entry] [ literal[string] ]
identifier[start_ts] = identifier[util] . identifier[bson_ts_to_long] ( identifier[rollback_cutoff_ts] )
identifier[end_ts] = identifier[last_inserted_doc] [ literal[string] ]
keyword[for] identifier[dm] keyword[in] identifier[self] . identifier[doc_managers] :
identifier[rollback_set] ={}
keyword[for] identifier[doc] keyword[in] identifier[dm] . identifier[search] ( identifier[start_ts] , identifier[end_ts] ):
keyword[if] identifier[doc] [ literal[string] ] keyword[in] identifier[rollback_set] :
identifier[rollback_set] [ identifier[doc] [ literal[string] ]]. identifier[append] ( identifier[doc] )
keyword[else] :
identifier[rollback_set] [ identifier[doc] [ literal[string] ]]=[ identifier[doc] ]
keyword[for] identifier[namespace] , identifier[doc_list] keyword[in] identifier[rollback_set] . identifier[items] ():
identifier[original_namespace] = identifier[self] . identifier[namespace_config] . identifier[unmap_namespace] ( identifier[namespace] )
keyword[if] keyword[not] identifier[original_namespace] :
identifier[original_namespace] = identifier[namespace]
identifier[database] , identifier[coll] = identifier[original_namespace] . identifier[split] ( literal[string] , literal[int] )
identifier[obj_id] = identifier[bson] . identifier[objectid] . identifier[ObjectId]
identifier[bson_obj_id_list] =[ identifier[obj_id] ( identifier[doc] [ literal[string] ]) keyword[for] identifier[doc] keyword[in] identifier[doc_list] ]
identifier[client] = identifier[self] . identifier[mongos_client] keyword[or] identifier[self] . identifier[primary_client]
identifier[to_update] = identifier[util] . identifier[retry_until_ok] (
identifier[client] [ identifier[database] ][ identifier[coll] ]. identifier[find] ,
{ literal[string] :{ literal[string] : identifier[bson_obj_id_list] }},
identifier[projection] = identifier[self] . identifier[namespace_config] . identifier[projection] ( identifier[original_namespace] ),
)
identifier[doc_hash] ={}
keyword[for] identifier[doc] keyword[in] identifier[doc_list] :
identifier[doc_hash] [ identifier[bson] . identifier[objectid] . identifier[ObjectId] ( identifier[doc] [ literal[string] ])]= identifier[doc]
identifier[to_index] =[]
keyword[def] identifier[collect_existing_docs] ():
keyword[for] identifier[doc] keyword[in] identifier[to_update] :
keyword[if] identifier[doc] [ literal[string] ] keyword[in] identifier[doc_hash] :
keyword[del] identifier[doc_hash] [ identifier[doc] [ literal[string] ]]
identifier[to_index] . identifier[append] ( identifier[doc] )
identifier[retry_until_ok] ( identifier[collect_existing_docs] )
identifier[LOG] . identifier[debug] ( literal[string] literal[string] )
identifier[remov_inc] = literal[int]
keyword[for] identifier[document_id] keyword[in] identifier[doc_hash] :
keyword[try] :
identifier[dm] . identifier[remove] (
identifier[document_id] ,
identifier[namespace] ,
identifier[util] . identifier[bson_ts_to_long] ( identifier[rollback_cutoff_ts] ),
)
identifier[remov_inc] += literal[int]
identifier[LOG] . identifier[debug] ( literal[string] % identifier[doc] )
keyword[except] identifier[errors] . identifier[OperationFailed] :
identifier[LOG] . identifier[warning] (
literal[string]
literal[string]
literal[string]
literal[string] % identifier[doc]
)
identifier[LOG] . identifier[debug] ( literal[string] % identifier[remov_inc] )
identifier[LOG] . identifier[debug] ( literal[string] literal[string] )
identifier[insert_inc] = literal[int]
identifier[fail_insert_inc] = literal[int]
keyword[for] identifier[doc] keyword[in] identifier[to_index] :
keyword[try] :
identifier[insert_inc] += literal[int]
identifier[dm] . identifier[upsert] (
identifier[doc] , identifier[namespace] , identifier[util] . identifier[bson_ts_to_long] ( identifier[rollback_cutoff_ts] )
)
keyword[except] identifier[errors] . identifier[OperationFailed] :
identifier[fail_insert_inc] += literal[int]
identifier[LOG] . identifier[exception] (
literal[string] literal[string] % identifier[doc]
)
identifier[LOG] . identifier[debug] (
literal[string]
literal[string]
literal[string]
%( identifier[insert_inc] , identifier[fail_insert_inc] , identifier[str] ( identifier[rollback_cutoff_ts] ))
)
keyword[return] identifier[rollback_cutoff_ts]
|
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug('OplogThread: Initiating rollback sequence to bring system into a consistent state.')
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc()) # depends on [control=['for'], data=['dm']]
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs, key=lambda x: x['_ts'] if x else float('-inf'))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None # depends on [control=['if'], data=[]]
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(self.oplog.find_one, {'ts': {'$lte': target_ts}, 'op': {'$ne': 'n'}}, sort=[('$natural', pymongo.DESCENDING)])
LOG.debug('OplogThread: last oplog entry is %s' % str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None # depends on [control=['if'], data=[]]
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc) # depends on [control=['if'], data=['rollback_set']]
else:
rollback_set[doc['ns']] = [doc] # depends on [control=['for'], data=['doc']]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for (namespace, doc_list) in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(namespace)
if not original_namespace:
original_namespace = namespace # depends on [control=['if'], data=[]]
(database, coll) = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(client[database][coll].find, {'_id': {'$in': bson_obj_id_list}}, projection=self.namespace_config.projection(original_namespace))
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc # depends on [control=['for'], data=['doc']]
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc) # depends on [control=['if'], data=['doc_hash']] # depends on [control=['for'], data=['doc']]
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug('OplogThread: Rollback, removing inconsistent docs.')
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(document_id, namespace, util.bson_ts_to_long(rollback_cutoff_ts))
remov_inc += 1
LOG.debug('OplogThread: Rollback, removed %r ' % doc) # depends on [control=['try'], data=[]]
except errors.OperationFailed:
LOG.warning('Could not delete document during rollback: %r This can happen if this document was already removed by another rollback happening at the same time.' % doc) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['document_id']]
LOG.debug('OplogThread: Rollback, removed %d docs.' % remov_inc)
# Insert the ones from mongo
LOG.debug('OplogThread: Rollback, inserting documents from mongo.')
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(doc, namespace, util.bson_ts_to_long(rollback_cutoff_ts)) # depends on [control=['try'], data=[]]
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception('OplogThread: Rollback, Unable to insert %r' % doc) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['doc']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['dm']]
LOG.debug('OplogThread: Rollback, Successfully inserted %d documents and failed to insert %d documents. Returning a rollback cutoff time of %s ' % (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
|
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
|
def function[execute_get, parameter[self, resource]]:
constant[
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
]
variable[url] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f00f5b0>, <ast.Name object at 0x7da18f00d7b0>]]]
variable[headers] assign[=] call[name[kwargs].pop, parameter[constant[headers], call[name[dict], parameter[]]]]
call[name[headers]][constant[Accept]] assign[=] constant[application/json]
call[name[headers]][constant[Content-Type]] assign[=] constant[application/json]
if name[kwargs] begin[:]
variable[separator] assign[=] <ast.IfExp object at 0x7da18f00d5a0>
for taget[tuple[[<ast.Name object at 0x7da18f00dab0>, <ast.Name object at 0x7da18f00f1c0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18f00d480> begin[:]
variable[url] assign[=] binary_operation[constant[%s%s%s=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00ed70>, <ast.Name object at 0x7da18f00e560>, <ast.Name object at 0x7da18f00c7c0>, <ast.Call object at 0x7da18f00fe80>]]]
variable[separator] assign[=] constant[&]
if name[self]._access_token begin[:]
call[name[headers]][constant[Authorization]] assign[=] binary_operation[constant[Bearer %s] <ast.Mod object at 0x7da2590d6920> name[self]._access_token]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[binary_operation[name[response].status_code <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da18f00d720>
return[call[name[response].json, parameter[]]]
|
keyword[def] identifier[execute_get] ( identifier[self] , identifier[resource] ,** identifier[kwargs] ):
literal[string]
identifier[url] = literal[string] %( identifier[self] . identifier[base_url] , identifier[resource] )
identifier[headers] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[dict] ())
identifier[headers] [ literal[string] ]= literal[string]
identifier[headers] [ literal[string] ]= literal[string]
keyword[if] identifier[kwargs] :
identifier[separator] = literal[string] keyword[if] literal[string] keyword[in] identifier[url] keyword[else] literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ) keyword[and] identifier[type] ( identifier[value] ) keyword[not] keyword[in] identifier[six] . identifier[string_types] :
identifier[url] = literal[string] %( identifier[url] , identifier[separator] , identifier[key] , literal[string] . identifier[join] ( identifier[value] ))
keyword[else] :
identifier[url] = literal[string] %( identifier[url] , identifier[separator] , identifier[key] , identifier[value] )
identifier[separator] = literal[string]
keyword[if] identifier[self] . identifier[_access_token] :
identifier[headers] [ literal[string] ]= literal[string] % identifier[self] . identifier[_access_token]
keyword[else] :
identifier[separator] = literal[string] keyword[if] literal[string] keyword[in] identifier[url] keyword[else] literal[string]
identifier[url] = literal[string] %(
identifier[url] , identifier[separator] , identifier[self] . identifier[_client_id] , identifier[self] . identifier[_client_secret]
)
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] )
keyword[if] identifier[response] . identifier[status_code] // literal[int] != literal[int] :
keyword[raise] identifier[GhostException] ( identifier[response] . identifier[status_code] , identifier[response] . identifier[json] (). identifier[get] ( literal[string] ,[]))
keyword[return] identifier[response] . identifier[json] ()
|
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for (key, value) in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value)) # depends on [control=['if'], data=[]]
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&' # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token # depends on [control=['if'], data=[]]
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (url, separator, self._client_id, self._client_secret)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', [])) # depends on [control=['if'], data=[]]
return response.json()
|
def metastability(alpha, T, right_eigenvectors, square_map, pi):
"""Return the metastability PCCA+ objective function.
Parameters
----------
alpha : ndarray
Parameters of objective function (e.g. flattened A)
T : csr sparse matrix
Transition matrix
right_eigenvectors : ndarray
The right eigenvectors.
square_map : ndarray
Mapping from square indices (i,j) to flat indices (k).
pi : ndarray
Equilibrium Populations of transition matrix.
Returns
-------
obj : float
The objective function
Notes
-------
metastability: try to make metastable fuzzy state decomposition.
Defined in ref. [2].
"""
num_micro, num_eigen = right_eigenvectors.shape
A, chi, mapping = calculate_fuzzy_chi(alpha, square_map,
right_eigenvectors)
# If current point is infeasible or leads to degenerate lumping.
if (len(np.unique(mapping)) != right_eigenvectors.shape[1] or
has_constraint_violation(A, right_eigenvectors)):
return -1.0 * np.inf
obj = 0.0
# Calculate metastabilty of the lumped model. Eqn 4.20 in LAA.
for i in range(num_eigen):
obj += np.dot(T.dot(chi[:, i]), pi * chi[:, i]) / np.dot(chi[:, i], pi)
return obj
|
def function[metastability, parameter[alpha, T, right_eigenvectors, square_map, pi]]:
constant[Return the metastability PCCA+ objective function.
Parameters
----------
alpha : ndarray
Parameters of objective function (e.g. flattened A)
T : csr sparse matrix
Transition matrix
right_eigenvectors : ndarray
The right eigenvectors.
square_map : ndarray
Mapping from square indices (i,j) to flat indices (k).
pi : ndarray
Equilibrium Populations of transition matrix.
Returns
-------
obj : float
The objective function
Notes
-------
metastability: try to make metastable fuzzy state decomposition.
Defined in ref. [2].
]
<ast.Tuple object at 0x7da1b068ebc0> assign[=] name[right_eigenvectors].shape
<ast.Tuple object at 0x7da1b068e2f0> assign[=] call[name[calculate_fuzzy_chi], parameter[name[alpha], name[square_map], name[right_eigenvectors]]]
if <ast.BoolOp object at 0x7da1b068e920> begin[:]
return[binary_operation[<ast.UnaryOp object at 0x7da1b063d600> * name[np].inf]]
variable[obj] assign[=] constant[0.0]
for taget[name[i]] in starred[call[name[range], parameter[name[num_eigen]]]] begin[:]
<ast.AugAssign object at 0x7da1b063dae0>
return[name[obj]]
|
keyword[def] identifier[metastability] ( identifier[alpha] , identifier[T] , identifier[right_eigenvectors] , identifier[square_map] , identifier[pi] ):
literal[string]
identifier[num_micro] , identifier[num_eigen] = identifier[right_eigenvectors] . identifier[shape]
identifier[A] , identifier[chi] , identifier[mapping] = identifier[calculate_fuzzy_chi] ( identifier[alpha] , identifier[square_map] ,
identifier[right_eigenvectors] )
keyword[if] ( identifier[len] ( identifier[np] . identifier[unique] ( identifier[mapping] ))!= identifier[right_eigenvectors] . identifier[shape] [ literal[int] ] keyword[or]
identifier[has_constraint_violation] ( identifier[A] , identifier[right_eigenvectors] )):
keyword[return] - literal[int] * identifier[np] . identifier[inf]
identifier[obj] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_eigen] ):
identifier[obj] += identifier[np] . identifier[dot] ( identifier[T] . identifier[dot] ( identifier[chi] [:, identifier[i] ]), identifier[pi] * identifier[chi] [:, identifier[i] ])/ identifier[np] . identifier[dot] ( identifier[chi] [:, identifier[i] ], identifier[pi] )
keyword[return] identifier[obj]
|
def metastability(alpha, T, right_eigenvectors, square_map, pi):
"""Return the metastability PCCA+ objective function.
Parameters
----------
alpha : ndarray
Parameters of objective function (e.g. flattened A)
T : csr sparse matrix
Transition matrix
right_eigenvectors : ndarray
The right eigenvectors.
square_map : ndarray
Mapping from square indices (i,j) to flat indices (k).
pi : ndarray
Equilibrium Populations of transition matrix.
Returns
-------
obj : float
The objective function
Notes
-------
metastability: try to make metastable fuzzy state decomposition.
Defined in ref. [2].
"""
(num_micro, num_eigen) = right_eigenvectors.shape
(A, chi, mapping) = calculate_fuzzy_chi(alpha, square_map, right_eigenvectors)
# If current point is infeasible or leads to degenerate lumping.
if len(np.unique(mapping)) != right_eigenvectors.shape[1] or has_constraint_violation(A, right_eigenvectors):
return -1.0 * np.inf # depends on [control=['if'], data=[]]
obj = 0.0
# Calculate metastabilty of the lumped model. Eqn 4.20 in LAA.
for i in range(num_eigen):
obj += np.dot(T.dot(chi[:, i]), pi * chi[:, i]) / np.dot(chi[:, i], pi) # depends on [control=['for'], data=['i']]
return obj
|
def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]:
"""
For all types that are supported,
lists all extensions that can be parsed into such a type.
For each extension, provides the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# List all types that can be parsed
for typ in self.get_all_supported_types():
res[typ] = self.get_capabilities_for_type(typ, strict_type_matching)
return res
|
def function[get_capabilities_by_type, parameter[self, strict_type_matching]]:
constant[
For all types that are supported,
lists all extensions that can be parsed into such a type.
For each extension, provides the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
]
call[name[check_var], parameter[name[strict_type_matching]]]
variable[res] assign[=] call[name[dict], parameter[]]
for taget[name[typ]] in starred[call[name[self].get_all_supported_types, parameter[]]] begin[:]
call[name[res]][name[typ]] assign[=] call[name[self].get_capabilities_for_type, parameter[name[typ], name[strict_type_matching]]]
return[name[res]]
|
keyword[def] identifier[get_capabilities_by_type] ( identifier[self] , identifier[strict_type_matching] : identifier[bool] = keyword[False] )-> identifier[Dict] [ identifier[Type] , identifier[Dict] [ identifier[str] , identifier[Dict] [ identifier[str] , identifier[Parser] ]]]:
literal[string]
identifier[check_var] ( identifier[strict_type_matching] , identifier[var_types] = identifier[bool] , identifier[var_name] = literal[string] )
identifier[res] = identifier[dict] ()
keyword[for] identifier[typ] keyword[in] identifier[self] . identifier[get_all_supported_types] ():
identifier[res] [ identifier[typ] ]= identifier[self] . identifier[get_capabilities_for_type] ( identifier[typ] , identifier[strict_type_matching] )
keyword[return] identifier[res]
|
def get_capabilities_by_type(self, strict_type_matching: bool=False) -> Dict[Type, Dict[str, Dict[str, Parser]]]:
"""
For all types that are supported,
lists all extensions that can be parsed into such a type.
For each extension, provides the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return:
"""
check_var(strict_type_matching, var_types=bool, var_name='strict_matching')
res = dict()
# List all types that can be parsed
for typ in self.get_all_supported_types():
res[typ] = self.get_capabilities_for_type(typ, strict_type_matching) # depends on [control=['for'], data=['typ']]
return res
|
def timeseries(self):
"""Simulated time series"""
if self._timeseries is None:
self.compute()
if isinstance(self.system, NetworkModel):
return self.system._reshape_timeseries(self._timeseries)
else:
return self._timeseries
|
def function[timeseries, parameter[self]]:
constant[Simulated time series]
if compare[name[self]._timeseries is constant[None]] begin[:]
call[name[self].compute, parameter[]]
if call[name[isinstance], parameter[name[self].system, name[NetworkModel]]] begin[:]
return[call[name[self].system._reshape_timeseries, parameter[name[self]._timeseries]]]
|
keyword[def] identifier[timeseries] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_timeseries] keyword[is] keyword[None] :
identifier[self] . identifier[compute] ()
keyword[if] identifier[isinstance] ( identifier[self] . identifier[system] , identifier[NetworkModel] ):
keyword[return] identifier[self] . identifier[system] . identifier[_reshape_timeseries] ( identifier[self] . identifier[_timeseries] )
keyword[else] :
keyword[return] identifier[self] . identifier[_timeseries]
|
def timeseries(self):
"""Simulated time series"""
if self._timeseries is None:
self.compute() # depends on [control=['if'], data=[]]
if isinstance(self.system, NetworkModel):
return self.system._reshape_timeseries(self._timeseries) # depends on [control=['if'], data=[]]
else:
return self._timeseries
|
def _handle_page_args(self, rison_args):
"""
Helper function to handle rison page
arguments, sets defaults and impose
FAB_API_MAX_PAGE_SIZE
:param rison_args:
:return: (tuple) page, page_size
"""
page = rison_args.get(API_PAGE_INDEX_RIS_KEY, 0)
page_size = rison_args.get(API_PAGE_SIZE_RIS_KEY, self.page_size)
return self._sanitize_page_args(page, page_size)
|
def function[_handle_page_args, parameter[self, rison_args]]:
constant[
Helper function to handle rison page
arguments, sets defaults and impose
FAB_API_MAX_PAGE_SIZE
:param rison_args:
:return: (tuple) page, page_size
]
variable[page] assign[=] call[name[rison_args].get, parameter[name[API_PAGE_INDEX_RIS_KEY], constant[0]]]
variable[page_size] assign[=] call[name[rison_args].get, parameter[name[API_PAGE_SIZE_RIS_KEY], name[self].page_size]]
return[call[name[self]._sanitize_page_args, parameter[name[page], name[page_size]]]]
|
keyword[def] identifier[_handle_page_args] ( identifier[self] , identifier[rison_args] ):
literal[string]
identifier[page] = identifier[rison_args] . identifier[get] ( identifier[API_PAGE_INDEX_RIS_KEY] , literal[int] )
identifier[page_size] = identifier[rison_args] . identifier[get] ( identifier[API_PAGE_SIZE_RIS_KEY] , identifier[self] . identifier[page_size] )
keyword[return] identifier[self] . identifier[_sanitize_page_args] ( identifier[page] , identifier[page_size] )
|
def _handle_page_args(self, rison_args):
"""
Helper function to handle rison page
arguments, sets defaults and impose
FAB_API_MAX_PAGE_SIZE
:param rison_args:
:return: (tuple) page, page_size
"""
page = rison_args.get(API_PAGE_INDEX_RIS_KEY, 0)
page_size = rison_args.get(API_PAGE_SIZE_RIS_KEY, self.page_size)
return self._sanitize_page_args(page, page_size)
|
def update_model(self, idx=None):
"""Updates the value of property at given index. If idx is
None, all controlled indices will be updated. This method
should be called directly by the user in very unusual
conditions."""
if idx is None:
for w in self._widgets:
idx = self._get_idx_from_widget(w)
try: val = self._read_widget(idx)
except ValueError: pass
else: self._write_property(val, idx)
pass
pass
else:
try: val = self._read_widget(idx)
except ValueError: pass
else: self._write_property(val, idx)
return
|
def function[update_model, parameter[self, idx]]:
constant[Updates the value of property at given index. If idx is
None, all controlled indices will be updated. This method
should be called directly by the user in very unusual
conditions.]
if compare[name[idx] is constant[None]] begin[:]
for taget[name[w]] in starred[name[self]._widgets] begin[:]
variable[idx] assign[=] call[name[self]._get_idx_from_widget, parameter[name[w]]]
<ast.Try object at 0x7da1b16d6710>
pass
pass
return[None]
|
keyword[def] identifier[update_model] ( identifier[self] , identifier[idx] = keyword[None] ):
literal[string]
keyword[if] identifier[idx] keyword[is] keyword[None] :
keyword[for] identifier[w] keyword[in] identifier[self] . identifier[_widgets] :
identifier[idx] = identifier[self] . identifier[_get_idx_from_widget] ( identifier[w] )
keyword[try] : identifier[val] = identifier[self] . identifier[_read_widget] ( identifier[idx] )
keyword[except] identifier[ValueError] : keyword[pass]
keyword[else] : identifier[self] . identifier[_write_property] ( identifier[val] , identifier[idx] )
keyword[pass]
keyword[pass]
keyword[else] :
keyword[try] : identifier[val] = identifier[self] . identifier[_read_widget] ( identifier[idx] )
keyword[except] identifier[ValueError] : keyword[pass]
keyword[else] : identifier[self] . identifier[_write_property] ( identifier[val] , identifier[idx] )
keyword[return]
|
def update_model(self, idx=None):
"""Updates the value of property at given index. If idx is
None, all controlled indices will be updated. This method
should be called directly by the user in very unusual
conditions."""
if idx is None:
for w in self._widgets:
idx = self._get_idx_from_widget(w)
try:
val = self._read_widget(idx) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
else:
self._write_property(val, idx)
pass # depends on [control=['for'], data=['w']]
pass # depends on [control=['if'], data=['idx']]
else:
try:
val = self._read_widget(idx) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
else:
self._write_property(val, idx)
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.