code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def GetAllPluginInformation(cls, show_all=True):
"""Retrieves a list of the registered analysis plugins.
Args:
show_all (Optional[bool]): True if all analysis plugin names should
be listed.
Returns:
list[tuple[str, str, str]]: the name, docstring and type string of each
analysis plugin in alphabetical order.
"""
results = []
for plugin_class in iter(cls._plugin_classes.values()):
plugin_object = plugin_class()
if not show_all and not plugin_class.ENABLE_IN_EXTRACTION:
continue
# TODO: Use a specific description variable, not the docstring.
doc_string, _, _ = plugin_class.__doc__.partition('\n')
type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)
information_tuple = (plugin_object.plugin_name, doc_string, type_string)
results.append(information_tuple)
return sorted(results) | Retrieves a list of the registered analysis plugins.
Args:
show_all (Optional[bool]): True if all analysis plugin names should
be listed.
Returns:
list[tuple[str, str, str]]: the name, docstring and type string of each
analysis plugin in alphabetical order. | Below is the the instruction that describes the task:
### Input:
Retrieves a list of the registered analysis plugins.
Args:
show_all (Optional[bool]): True if all analysis plugin names should
be listed.
Returns:
list[tuple[str, str, str]]: the name, docstring and type string of each
analysis plugin in alphabetical order.
### Response:
def GetAllPluginInformation(cls, show_all=True):
"""Retrieves a list of the registered analysis plugins.
Args:
show_all (Optional[bool]): True if all analysis plugin names should
be listed.
Returns:
list[tuple[str, str, str]]: the name, docstring and type string of each
analysis plugin in alphabetical order.
"""
results = []
for plugin_class in iter(cls._plugin_classes.values()):
plugin_object = plugin_class()
if not show_all and not plugin_class.ENABLE_IN_EXTRACTION:
continue
# TODO: Use a specific description variable, not the docstring.
doc_string, _, _ = plugin_class.__doc__.partition('\n')
type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)
information_tuple = (plugin_object.plugin_name, doc_string, type_string)
results.append(information_tuple)
return sorted(results) |
def send(self, response):
""" Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
"""
self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),
pickle.dumps(response)) | Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent. | Below is the the instruction that describes the task:
### Input:
Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
### Response:
def send(self, response):
""" Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
"""
self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),
pickle.dumps(response)) |
def generate(self):
"""Generate the next neural architecture.
Returns
-------
other_info: any object
Anything to be saved in the training queue together with the architecture.
generated_graph: Graph
An instance of Graph.
"""
generated_graph, new_father_id = self.bo.generate(self.descriptors)
if new_father_id is None:
new_father_id = 0
generated_graph = self.generators[0](
self.n_classes, self.input_shape
).generate(self.default_model_len, self.default_model_width)
return new_father_id, generated_graph | Generate the next neural architecture.
Returns
-------
other_info: any object
Anything to be saved in the training queue together with the architecture.
generated_graph: Graph
An instance of Graph. | Below is the the instruction that describes the task:
### Input:
Generate the next neural architecture.
Returns
-------
other_info: any object
Anything to be saved in the training queue together with the architecture.
generated_graph: Graph
An instance of Graph.
### Response:
def generate(self):
"""Generate the next neural architecture.
Returns
-------
other_info: any object
Anything to be saved in the training queue together with the architecture.
generated_graph: Graph
An instance of Graph.
"""
generated_graph, new_father_id = self.bo.generate(self.descriptors)
if new_father_id is None:
new_father_id = 0
generated_graph = self.generators[0](
self.n_classes, self.input_shape
).generate(self.default_model_len, self.default_model_width)
return new_father_id, generated_graph |
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result | Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`. | Below is the the instruction that describes the task:
### Input:
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
### Response:
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result |
def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]:
"""Get several graphs by their identifiers."""
return [
self.networks[network_id]
for network_id in network_ids
] | Get several graphs by their identifiers. | Below is the the instruction that describes the task:
### Input:
Get several graphs by their identifiers.
### Response:
def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]:
"""Get several graphs by their identifiers."""
return [
self.networks[network_id]
for network_id in network_ids
] |
def _parse_users(self, match):
'''Parse usernames.'''
# Don't parse lists here
if match.group(2) is not None:
return match.group(0)
mat = match.group(0)
if self._include_spans:
self._users.append((mat[1:], match.span(0)))
else:
self._users.append(mat[1:])
if self._html:
return self.format_username(mat[0:1], mat[1:]) | Parse usernames. | Below is the the instruction that describes the task:
### Input:
Parse usernames.
### Response:
def _parse_users(self, match):
'''Parse usernames.'''
# Don't parse lists here
if match.group(2) is not None:
return match.group(0)
mat = match.group(0)
if self._include_spans:
self._users.append((mat[1:], match.span(0)))
else:
self._users.append(mat[1:])
if self._html:
return self.format_username(mat[0:1], mat[1:]) |
def basic_retinotopy_data(hemi, retino_type):
'''
basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi
and retinotopy type t; it does this by looking at the properties in hemi and picking out any
combination that is commonly used to denote empirical retinotopy data. These common names are
stored in _predicted_retintopy_names, in order of preference, which may be modified.
The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'.
Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this
function calls both of these (predicted first then empirical) in the case that it does not
find a valid property.
'''
dat = _retinotopy_names[retino_type.lower()]
val = next((hemi.prop(s) for s in six.iterkeys(hemi.properties) if s.lower() in dat), None)
if val is None and retino_type.lower() != 'weight':
val = predicted_retinotopy_data(hemi, retino_type)
if val is None and retino_type.lower() != 'visual_area':
val = empirical_retinotopy_data(hemi, retino_type)
return val | basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi
and retinotopy type t; it does this by looking at the properties in hemi and picking out any
combination that is commonly used to denote empirical retinotopy data. These common names are
stored in _predicted_retintopy_names, in order of preference, which may be modified.
The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'.
Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this
function calls both of these (predicted first then empirical) in the case that it does not
find a valid property. | Below is the the instruction that describes the task:
### Input:
basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi
and retinotopy type t; it does this by looking at the properties in hemi and picking out any
combination that is commonly used to denote empirical retinotopy data. These common names are
stored in _predicted_retintopy_names, in order of preference, which may be modified.
The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'.
Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this
function calls both of these (predicted first then empirical) in the case that it does not
find a valid property.
### Response:
def basic_retinotopy_data(hemi, retino_type):
'''
basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi
and retinotopy type t; it does this by looking at the properties in hemi and picking out any
combination that is commonly used to denote empirical retinotopy data. These common names are
stored in _predicted_retintopy_names, in order of preference, which may be modified.
The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'.
Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this
function calls both of these (predicted first then empirical) in the case that it does not
find a valid property.
'''
dat = _retinotopy_names[retino_type.lower()]
val = next((hemi.prop(s) for s in six.iterkeys(hemi.properties) if s.lower() in dat), None)
if val is None and retino_type.lower() != 'weight':
val = predicted_retinotopy_data(hemi, retino_type)
if val is None and retino_type.lower() != 'visual_area':
val = empirical_retinotopy_data(hemi, retino_type)
return val |
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2) | If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query | Below is the the instruction that describes the task:
### Input:
If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
### Response:
def _get_conditions(pk_conds, and_conds=None):
"""If and_conds = [a1, a2, ..., an] and pk_conds = [[b11, b12, ..., b1m], ... [bk1, ..., bkm]],
this function will return the mysql condition clause:
a1 & a2 & ... an & ((b11 and ... b1m) or ... (b11 and ... b1m))
:param pk_conds: a list of list of primary key constraints returned by _get_conditions_list
:param and_conds: additional and conditions to be placed on the query
"""
if and_conds is None:
and_conds = []
if len(and_conds) == 0 and len(pk_conds) == 0:
return sa.and_()
condition1 = sa.and_(*and_conds)
condition2 = sa.or_(*[sa.and_(*cond) for cond in pk_conds])
return sa.and_(condition1, condition2) |
def txn2data(self, txn: dict) -> str:
"""
Given ledger transaction, return its data json.
:param txn: transaction as dict
:return: transaction data json
"""
rv_json = json.dumps({})
if self == Protocol.V_13:
rv_json = json.dumps(txn['result'].get('data', {}))
else:
rv_json = json.dumps((txn['result'].get('data', {}) or {}).get('txn', {})) # "data": null for no such txn
return rv_json | Given ledger transaction, return its data json.
:param txn: transaction as dict
:return: transaction data json | Below is the the instruction that describes the task:
### Input:
Given ledger transaction, return its data json.
:param txn: transaction as dict
:return: transaction data json
### Response:
def txn2data(self, txn: dict) -> str:
"""
Given ledger transaction, return its data json.
:param txn: transaction as dict
:return: transaction data json
"""
rv_json = json.dumps({})
if self == Protocol.V_13:
rv_json = json.dumps(txn['result'].get('data', {}))
else:
rv_json = json.dumps((txn['result'].get('data', {}) or {}).get('txn', {})) # "data": null for no such txn
return rv_json |
def read_excel_file(inputfile, sheet_name):
""" Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return
"""
workbook = xlrd.open_workbook(inputfile)
output = []
found = False
for sheet in workbook.sheets():
if sheet.name == sheet_name:
found = True
for row in range(sheet.nrows):
values = []
for col in range(sheet.ncols):
values.append(sheet.cell(row, col).value)
output.append(values)
if not found: # pragma: no cover
raise MQ2Exception('Invalid session identifier provided')
return output | Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return | Below is the the instruction that describes the task:
### Input:
Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return
### Response:
def read_excel_file(inputfile, sheet_name):
""" Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return
"""
workbook = xlrd.open_workbook(inputfile)
output = []
found = False
for sheet in workbook.sheets():
if sheet.name == sheet_name:
found = True
for row in range(sheet.nrows):
values = []
for col in range(sheet.ncols):
values.append(sheet.cell(row, col).value)
output.append(values)
if not found: # pragma: no cover
raise MQ2Exception('Invalid session identifier provided')
return output |
def _validate_positional_arguments(args):
"""
To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments.
"""
nouns = []
for arg in args:
if not arg.startswith('-') or not arg.startswith('{{'):
nouns.append(arg)
else:
break
while nouns:
search = ' '.join(nouns)
# Since the command name may be immediately followed by a positional arg, strip those off
if not next((x for x in azext_alias.cached_reserved_commands if x.endswith(search)), False):
del nouns[-1]
else:
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(' '.join(args))) | To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments. | Below is the the instruction that describes the task:
### Input:
To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments.
### Response:
def _validate_positional_arguments(args):
"""
To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments.
"""
nouns = []
for arg in args:
if not arg.startswith('-') or not arg.startswith('{{'):
nouns.append(arg)
else:
break
while nouns:
search = ' '.join(nouns)
# Since the command name may be immediately followed by a positional arg, strip those off
if not next((x for x in azext_alias.cached_reserved_commands if x.endswith(search)), False):
del nouns[-1]
else:
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(' '.join(args))) |
def create_entry_tag(sender, instance, created, **kwargs):
"""
Creates EntryTag for Entry corresponding to specified
ItemBase instance.
:param sender: the sending ItemBase class.
:param instance: the ItemBase instance.
"""
from ..models import (
Entry,
EntryTag
)
entry = Entry.objects.get_for_model(instance.content_object)[0]
tag = instance.tag
if not EntryTag.objects.filter(tag=tag, entry=entry).exists():
EntryTag.objects.create(tag=tag, entry=entry) | Creates EntryTag for Entry corresponding to specified
ItemBase instance.
:param sender: the sending ItemBase class.
:param instance: the ItemBase instance. | Below is the the instruction that describes the task:
### Input:
Creates EntryTag for Entry corresponding to specified
ItemBase instance.
:param sender: the sending ItemBase class.
:param instance: the ItemBase instance.
### Response:
def create_entry_tag(sender, instance, created, **kwargs):
"""
Creates EntryTag for Entry corresponding to specified
ItemBase instance.
:param sender: the sending ItemBase class.
:param instance: the ItemBase instance.
"""
from ..models import (
Entry,
EntryTag
)
entry = Entry.objects.get_for_model(instance.content_object)[0]
tag = instance.tag
if not EntryTag.objects.filter(tag=tag, entry=entry).exists():
EntryTag.objects.create(tag=tag, entry=entry) |
def format_result(result):
"""Serialise Result"""
instance = None
error = None
if result["instance"] is not None:
instance = format_instance(result["instance"])
if result["error"] is not None:
error = format_error(result["error"])
result = {
"success": result["success"],
"plugin": format_plugin(result["plugin"]),
"instance": instance,
"error": error,
"records": format_records(result["records"]),
"duration": result["duration"]
}
if os.getenv("PYBLISH_SAFE"):
schema.validate(result, "result")
return result | Serialise Result | Below is the the instruction that describes the task:
### Input:
Serialise Result
### Response:
def format_result(result):
"""Serialise Result"""
instance = None
error = None
if result["instance"] is not None:
instance = format_instance(result["instance"])
if result["error"] is not None:
error = format_error(result["error"])
result = {
"success": result["success"],
"plugin": format_plugin(result["plugin"]),
"instance": instance,
"error": error,
"records": format_records(result["records"]),
"duration": result["duration"]
}
if os.getenv("PYBLISH_SAFE"):
schema.validate(result, "result")
return result |
def csv(self):
"""Parse raw response as csv and return row object list.
"""
lines = self._parsecsv(self.raw)
# set keys from header line (first line)
keys = next(lines)
for line in lines:
yield dict(zip(keys, line)) | Parse raw response as csv and return row object list. | Below is the the instruction that describes the task:
### Input:
Parse raw response as csv and return row object list.
### Response:
def csv(self):
"""Parse raw response as csv and return row object list.
"""
lines = self._parsecsv(self.raw)
# set keys from header line (first line)
keys = next(lines)
for line in lines:
yield dict(zip(keys, line)) |
def getFrameNumber(g):
"""
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
"""
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'status/DET.FRAM2.NO'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=False)
try:
msg = rs.msg
except:
raise DriverError('getFrameNumber error: no message found')
try:
frame_no = int(msg.split()[1])
except:
raise DriverError('getFrameNumber error: invalid msg ' + msg)
return frame_no | Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it. | Below is the the instruction that describes the task:
### Input:
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
### Response:
def getFrameNumber(g):
"""
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
"""
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'status/DET.FRAM2.NO'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=False)
try:
msg = rs.msg
except:
raise DriverError('getFrameNumber error: no message found')
try:
frame_no = int(msg.split()[1])
except:
raise DriverError('getFrameNumber error: invalid msg ' + msg)
return frame_no |
def cast_conditionally(data: mx.sym.Symbol, dtype: str) -> mx.sym.Symbol:
"""
Workaround until no-op cast will be fixed in MXNet codebase.
Creates cast symbol only if dtype is different from default one, i.e. float32.
:param data: Input symbol.
:param dtype: Target dtype.
:return: Cast symbol or just data symbol.
"""
if dtype != C.DTYPE_FP32:
return mx.sym.cast(data=data, dtype=dtype)
return data | Workaround until no-op cast will be fixed in MXNet codebase.
Creates cast symbol only if dtype is different from default one, i.e. float32.
:param data: Input symbol.
:param dtype: Target dtype.
:return: Cast symbol or just data symbol. | Below is the the instruction that describes the task:
### Input:
Workaround until no-op cast will be fixed in MXNet codebase.
Creates cast symbol only if dtype is different from default one, i.e. float32.
:param data: Input symbol.
:param dtype: Target dtype.
:return: Cast symbol or just data symbol.
### Response:
def cast_conditionally(data: mx.sym.Symbol, dtype: str) -> mx.sym.Symbol:
"""
Workaround until no-op cast will be fixed in MXNet codebase.
Creates cast symbol only if dtype is different from default one, i.e. float32.
:param data: Input symbol.
:param dtype: Target dtype.
:return: Cast symbol or just data symbol.
"""
if dtype != C.DTYPE_FP32:
return mx.sym.cast(data=data, dtype=dtype)
return data |
def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
"""
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value | Get the right value from the scikit-tree | Below is the the instruction that describes the task:
### Input:
Get the right value from the scikit-tree
### Response:
def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
"""
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value |
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start) | - An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise. | Below is the the instruction that describes the task:
### Input:
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
### Response:
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start) |
def reorder_mod(A, ci):
'''
This function reorders the connectivity matrix by modular structure and
may hence be useful in visualization of modular structure.
Parameters
----------
A : NxN np.ndarray
binary/weighted connectivity matrix
ci : Nx1 np.ndarray
module affiliation vector
Returns
-------
On : Nx1 np.ndarray
new node order
Ar : NxN np.ndarray
reordered connectivity matrix
'''
# TODO update function with 2015 changes
from scipy import stats
_, max_module_size = stats.mode(ci)
u, ci = np.unique(ci, return_inverse=True) # make consecutive
n = np.size(ci) # number of nodes
m = np.size(u) # number of modules
nm = np.zeros((m,)) # number of nodes in modules
knm = np.zeros((n, m)) # degree to other modules
for i in range(m):
nm[i] = np.size(np.where(ci == i))
knm[:, i] = np.sum(A[:, ci == i], axis=1)
am = np.zeros((m, m)) # relative intermodular connectivity
for i in range(m):
am[i, :] = np.sum(knm[ci == i, :], axis=0)
am /= np.outer(nm, nm)
# 1. Arrange densely connected modules together
# symmetrized intermodular connectivity
i, j = np.where(np.tril(am, -1) + 1)
s = (np.tril(am, -1) + 1)[i, j]
ord = np.argsort(s)[::-1] # sort by high relative connectivity
i = i[ord]
j = j[ord]
i += 1
j += 1 # fix off by 1 error so np.where doesnt
om = np.array((i[0], j[0])) # catch module 0
i[0] = 0
j[0] = 0
while len(om) < m: # while not all modules ordered
ui, = np.where(np.logical_and(
i, np.logical_or(j == om[0], j == om[-1])))
uj, = np.where(np.logical_and(
j, np.logical_or(i == om[0], i == om[-1])))
if np.size(ui):
ui = ui[0]
if np.size(uj):
uj = uj[0]
if ui == uj:
i[ui] = 0
j[uj] = 0
continue
if not np.size(ui):
ui = np.inf
if not np.size(uj):
uj = np.inf
if ui < uj:
old = j[ui]
new = i[ui]
if uj < ui:
old = i[uj]
new = j[uj]
if old == om[0]:
om = np.append((new,), om)
if old == om[-1]:
om = np.append(om, (new,))
i[i == old] = 0
j[j == old] = 0
print(om)
# 2. Reorder nodes within modules
on = np.zeros((n,), dtype=int)
for y, x in enumerate(om):
ind, = np.where(ci == x - 1) # indices
pos, = np.where(om == x) # position
# NOT DONE! OE NOES
mod_imp = np.array((om, np.sign(np.arange(m) - pos),
np.abs(np.arange(m) - pos), am[x - 1, om - 1])).T
print(np.shape((mod_imp[:, 3][::-1], mod_imp[:, 2])))
ix = np.lexsort((mod_imp[:, 3][::-1], mod_imp[:, 2]))
mod_imp = mod_imp[ix]
# at this point mod_imp agrees with the matlab version
signs = mod_imp[:, 1]
mod_imp = np.abs(mod_imp[:, 0] * mod_imp[:, 1])
mod_imp = np.append(mod_imp[1:], x)
mod_imp = np.array(mod_imp - 1, dtype=int)
print(mod_imp, signs)
# at this point mod_imp is the absolute value of that in the matlab
# version. this limitation comes from sortrows ability to deal with
# negative indices, which we would have to do manually.
# instead, i punt on its importance; i only bother to order by the
# principal dimension. some within-module orderings
# may potentially be a little bit out of order.
# ksmi=knm[ind,:].T[mod_imp[::-1]]
# reverse mod_imp to sort by the first column first and so on
# print ksmi
# for i,sin in enumerate(signs):
# if sin==-1:
# ksmi[i,:]=ksmi[i,:][::-1]
# print ksmi
# print np.shape(ksmi)
# ^ this is unworkable and wrong, lexsort alone cannot handle the
# negative indices problem of sortrows. you would pretty much need
# to rewrite sortrows to do lexsort plus negative indices; the algorithm
# cant be further simplified.
ord = np.lexsort(knm[np.ix_(ind, mod_imp[::-1])])
# ord=np.lexsort(knm[ind,:].T[mod_imp[::-1]])
if signs[mod_imp[0]] == -1:
ord = ord[::-1]
# reverse just the principal level and punt on the other levels.
# this will basically be fine for most purposes and probably won't
# ever show a difference for weighted graphs.
on[ind[ord]] = y * int(max_module_size) + \
np.arange(nm[x - 1], dtype=int)
on = np.argsort(on)
ar = A[np.ix_(on, on)]
return on, ar | This function reorders the connectivity matrix by modular structure and
may hence be useful in visualization of modular structure.
Parameters
----------
A : NxN np.ndarray
binary/weighted connectivity matrix
ci : Nx1 np.ndarray
module affiliation vector
Returns
-------
On : Nx1 np.ndarray
new node order
Ar : NxN np.ndarray
reordered connectivity matrix | Below is the the instruction that describes the task:
### Input:
This function reorders the connectivity matrix by modular structure and
may hence be useful in visualization of modular structure.
Parameters
----------
A : NxN np.ndarray
binary/weighted connectivity matrix
ci : Nx1 np.ndarray
module affiliation vector
Returns
-------
On : Nx1 np.ndarray
new node order
Ar : NxN np.ndarray
reordered connectivity matrix
### Response:
def reorder_mod(A, ci):
'''
This function reorders the connectivity matrix by modular structure and
may hence be useful in visualization of modular structure.
Parameters
----------
A : NxN np.ndarray
binary/weighted connectivity matrix
ci : Nx1 np.ndarray
module affiliation vector
Returns
-------
On : Nx1 np.ndarray
new node order
Ar : NxN np.ndarray
reordered connectivity matrix
'''
# TODO update function with 2015 changes
from scipy import stats
_, max_module_size = stats.mode(ci)
u, ci = np.unique(ci, return_inverse=True) # make consecutive
n = np.size(ci) # number of nodes
m = np.size(u) # number of modules
nm = np.zeros((m,)) # number of nodes in modules
knm = np.zeros((n, m)) # degree to other modules
for i in range(m):
nm[i] = np.size(np.where(ci == i))
knm[:, i] = np.sum(A[:, ci == i], axis=1)
am = np.zeros((m, m)) # relative intermodular connectivity
for i in range(m):
am[i, :] = np.sum(knm[ci == i, :], axis=0)
am /= np.outer(nm, nm)
# 1. Arrange densely connected modules together
# symmetrized intermodular connectivity
i, j = np.where(np.tril(am, -1) + 1)
s = (np.tril(am, -1) + 1)[i, j]
ord = np.argsort(s)[::-1] # sort by high relative connectivity
i = i[ord]
j = j[ord]
i += 1
j += 1 # fix off by 1 error so np.where doesnt
om = np.array((i[0], j[0])) # catch module 0
i[0] = 0
j[0] = 0
while len(om) < m: # while not all modules ordered
ui, = np.where(np.logical_and(
i, np.logical_or(j == om[0], j == om[-1])))
uj, = np.where(np.logical_and(
j, np.logical_or(i == om[0], i == om[-1])))
if np.size(ui):
ui = ui[0]
if np.size(uj):
uj = uj[0]
if ui == uj:
i[ui] = 0
j[uj] = 0
continue
if not np.size(ui):
ui = np.inf
if not np.size(uj):
uj = np.inf
if ui < uj:
old = j[ui]
new = i[ui]
if uj < ui:
old = i[uj]
new = j[uj]
if old == om[0]:
om = np.append((new,), om)
if old == om[-1]:
om = np.append(om, (new,))
i[i == old] = 0
j[j == old] = 0
print(om)
# 2. Reorder nodes within modules
on = np.zeros((n,), dtype=int)
for y, x in enumerate(om):
ind, = np.where(ci == x - 1) # indices
pos, = np.where(om == x) # position
# NOT DONE! OE NOES
mod_imp = np.array((om, np.sign(np.arange(m) - pos),
np.abs(np.arange(m) - pos), am[x - 1, om - 1])).T
print(np.shape((mod_imp[:, 3][::-1], mod_imp[:, 2])))
ix = np.lexsort((mod_imp[:, 3][::-1], mod_imp[:, 2]))
mod_imp = mod_imp[ix]
# at this point mod_imp agrees with the matlab version
signs = mod_imp[:, 1]
mod_imp = np.abs(mod_imp[:, 0] * mod_imp[:, 1])
mod_imp = np.append(mod_imp[1:], x)
mod_imp = np.array(mod_imp - 1, dtype=int)
print(mod_imp, signs)
# at this point mod_imp is the absolute value of that in the matlab
# version. this limitation comes from sortrows ability to deal with
# negative indices, which we would have to do manually.
# instead, i punt on its importance; i only bother to order by the
# principal dimension. some within-module orderings
# may potentially be a little bit out of order.
# ksmi=knm[ind,:].T[mod_imp[::-1]]
# reverse mod_imp to sort by the first column first and so on
# print ksmi
# for i,sin in enumerate(signs):
# if sin==-1:
# ksmi[i,:]=ksmi[i,:][::-1]
# print ksmi
# print np.shape(ksmi)
# ^ this is unworkable and wrong, lexsort alone cannot handle the
# negative indices problem of sortrows. you would pretty much need
# to rewrite sortrows to do lexsort plus negative indices; the algorithm
# cant be further simplified.
ord = np.lexsort(knm[np.ix_(ind, mod_imp[::-1])])
# ord=np.lexsort(knm[ind,:].T[mod_imp[::-1]])
if signs[mod_imp[0]] == -1:
ord = ord[::-1]
# reverse just the principal level and punt on the other levels.
# this will basically be fine for most purposes and probably won't
# ever show a difference for weighted graphs.
on[ind[ord]] = y * int(max_module_size) + \
np.arange(nm[x - 1], dtype=int)
on = np.argsort(on)
ar = A[np.ix_(on, on)]
return on, ar |
def short_label(self):
"""str: A short description of the group.
>>> device.group.short_label
'Kitchen + 1'
"""
group_names = sorted([m.player_name for m in self.members])
group_label = group_names[0]
if len(group_names) > 1:
group_label += " + {}".format(len(group_names) - 1)
return group_label | str: A short description of the group.
>>> device.group.short_label
'Kitchen + 1' | Below is the the instruction that describes the task:
### Input:
str: A short description of the group.
>>> device.group.short_label
'Kitchen + 1'
### Response:
def short_label(self):
"""str: A short description of the group.
>>> device.group.short_label
'Kitchen + 1'
"""
group_names = sorted([m.player_name for m in self.members])
group_label = group_names[0]
if len(group_names) > 1:
group_label += " + {}".format(len(group_names) - 1)
return group_label |
def holidays_set(self, year=None):
"Return a quick date index (set)"
return set([day for day, label in self.holidays(year)]) | Return a quick date index (set) | Below is the the instruction that describes the task:
### Input:
Return a quick date index (set)
### Response:
def holidays_set(self, year=None):
"Return a quick date index (set)"
return set([day for day, label in self.holidays(year)]) |
def forwards(self, orm):
"Write your forwards methods here."
orm.Project.objects.update(label=F('name'))
orm.Cohort.objects.update(label=F('name'))
orm.Sample.objects.update(name=F('label')) | Write your forwards methods here. | Below is the the instruction that describes the task:
### Input:
Write your forwards methods here.
### Response:
def forwards(self, orm):
"Write your forwards methods here."
orm.Project.objects.update(label=F('name'))
orm.Cohort.objects.update(label=F('name'))
orm.Sample.objects.update(name=F('label')) |
def serialize(self, data):
"""Return the data as serialized string.
:param dict data: The data to serialize
:rtype: str
"""
return json.dumps(self._serialize_datetime(data), ensure_ascii=False) | Return the data as serialized string.
:param dict data: The data to serialize
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return the data as serialized string.
:param dict data: The data to serialize
:rtype: str
### Response:
def serialize(self, data):
"""Return the data as serialized string.
:param dict data: The data to serialize
:rtype: str
"""
return json.dumps(self._serialize_datetime(data), ensure_ascii=False) |
def setVisible(self, state):
"""
Sets the visibility for this record box.
:param state | <bool>
"""
super(XOrbRecordBox, self).setVisible(state)
if state and not self._loaded:
if self.autoInitialize():
table = self.tableType()
if not table:
return
self.setRecords(table.select(where=self.query()))
else:
self.initialized.emit() | Sets the visibility for this record box.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Sets the visibility for this record box.
:param state | <bool>
### Response:
def setVisible(self, state):
"""
Sets the visibility for this record box.
:param state | <bool>
"""
super(XOrbRecordBox, self).setVisible(state)
if state and not self._loaded:
if self.autoInitialize():
table = self.tableType()
if not table:
return
self.setRecords(table.select(where=self.query()))
else:
self.initialized.emit() |
def get_niggli_reduced_lattice(self, tol: float = 1e-5) -> "Lattice":
"""
Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice.
"""
# lll reduction is more stable for skewed cells
matrix = self.lll_matrix
a = matrix[0]
b = matrix[1]
c = matrix[2]
e = tol * self.volume ** (1 / 3)
# Define metric tensor
G = [
[dot(a, a), dot(a, b), dot(a, c)],
[dot(a, b), dot(b, b), dot(b, c)],
[dot(a, c), dot(b, c), dot(c, c)],
]
G = np.array(G)
# This sets an upper limit on the number of iterations.
for count in range(100):
# The steps are labelled as Ax as per the labelling scheme in the
# paper.
(A, B, C, E, N, Y) = (
G[0, 0],
G[1, 1],
G[2, 2],
2 * G[1, 2],
2 * G[0, 2],
2 * G[0, 1],
)
if A > B + e or (abs(A - B) < e and abs(E) > abs(N) + e):
# A1
M = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
G = dot(transpose(M), dot(G, M))
if (B > C + e) or (abs(B - C) < e and abs(N) > abs(Y) + e):
# A2
M = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
G = dot(transpose(M), dot(G, M))
continue
l = 0 if abs(E) < e else E / abs(E)
m = 0 if abs(N) < e else N / abs(N)
n = 0 if abs(Y) < e else Y / abs(Y)
if l * m * n == 1:
# A3
i = -1 if l == -1 else 1
j = -1 if m == -1 else 1
k = -1 if n == -1 else 1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M))
elif l * m * n == 0 or l * m * n == -1:
# A4
i = -1 if l == 1 else 1
j = -1 if m == 1 else 1
k = -1 if n == 1 else 1
if i * j * k == -1:
if n == 0:
k = -1
elif m == 0:
j = -1
elif l == 0:
i = -1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M))
(A, B, C, E, N, Y) = (
G[0, 0],
G[1, 1],
G[2, 2],
2 * G[1, 2],
2 * G[0, 2],
2 * G[0, 1],
)
# A5
if (
abs(E) > B + e
or (abs(E - B) < e and 2 * N < Y - e)
or (abs(E + B) < e and Y < -e)
):
M = [[1, 0, 0], [0, 1, -E / abs(E)], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A6
if (
abs(N) > A + e
or (abs(A - N) < e and 2 * E < Y - e)
or (abs(A + N) < e and Y < -e)
):
M = [[1, 0, -N / abs(N)], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A7
if (
abs(Y) > A + e
or (abs(A - Y) < e and 2 * E < N - e)
or (abs(A + Y) < e and N < -e)
):
M = [[1, -Y / abs(Y), 0], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A8
if E + N + Y + A + B < -e or (abs(E + N + Y + A + B) < e < Y + (A + N) * 2):
M = [[1, 0, 1], [0, 1, 1], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
break
A = G[0, 0]
B = G[1, 1]
C = G[2, 2]
E = 2 * G[1, 2]
N = 2 * G[0, 2]
Y = 2 * G[0, 1]
a = math.sqrt(A)
b = math.sqrt(B)
c = math.sqrt(C)
alpha = math.acos(E / 2 / b / c) / math.pi * 180
beta = math.acos(N / 2 / a / c) / math.pi * 180
gamma = math.acos(Y / 2 / a / b) / math.pi * 180
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
mapped = self.find_mapping(latt, e, skip_rotation_matrix=True)
if mapped is not None:
if np.linalg.det(mapped[0].matrix) > 0:
return mapped[0]
else:
return Lattice(-mapped[0].matrix)
raise ValueError("can't find niggli") | Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice. | Below is the the instruction that describes the task:
### Input:
Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice.
### Response:
def get_niggli_reduced_lattice(self, tol: float = 1e-5) -> "Lattice":
"""
Get the Niggli reduced lattice using the numerically stable algo
proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,
Acta Crystallographica Section A Foundations of Crystallography, 2003,
60(1), 1-6. doi:10.1107/S010876730302186X
Args:
tol (float): The numerical tolerance. The default of 1e-5 should
result in stable behavior for most cases.
Returns:
Niggli-reduced lattice.
"""
# lll reduction is more stable for skewed cells
matrix = self.lll_matrix
a = matrix[0]
b = matrix[1]
c = matrix[2]
e = tol * self.volume ** (1 / 3)
# Define metric tensor
G = [
[dot(a, a), dot(a, b), dot(a, c)],
[dot(a, b), dot(b, b), dot(b, c)],
[dot(a, c), dot(b, c), dot(c, c)],
]
G = np.array(G)
# This sets an upper limit on the number of iterations.
for count in range(100):
# The steps are labelled as Ax as per the labelling scheme in the
# paper.
(A, B, C, E, N, Y) = (
G[0, 0],
G[1, 1],
G[2, 2],
2 * G[1, 2],
2 * G[0, 2],
2 * G[0, 1],
)
if A > B + e or (abs(A - B) < e and abs(E) > abs(N) + e):
# A1
M = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
G = dot(transpose(M), dot(G, M))
if (B > C + e) or (abs(B - C) < e and abs(N) > abs(Y) + e):
# A2
M = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
G = dot(transpose(M), dot(G, M))
continue
l = 0 if abs(E) < e else E / abs(E)
m = 0 if abs(N) < e else N / abs(N)
n = 0 if abs(Y) < e else Y / abs(Y)
if l * m * n == 1:
# A3
i = -1 if l == -1 else 1
j = -1 if m == -1 else 1
k = -1 if n == -1 else 1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M))
elif l * m * n == 0 or l * m * n == -1:
# A4
i = -1 if l == 1 else 1
j = -1 if m == 1 else 1
k = -1 if n == 1 else 1
if i * j * k == -1:
if n == 0:
k = -1
elif m == 0:
j = -1
elif l == 0:
i = -1
M = [[i, 0, 0], [0, j, 0], [0, 0, k]]
G = dot(transpose(M), dot(G, M))
(A, B, C, E, N, Y) = (
G[0, 0],
G[1, 1],
G[2, 2],
2 * G[1, 2],
2 * G[0, 2],
2 * G[0, 1],
)
# A5
if (
abs(E) > B + e
or (abs(E - B) < e and 2 * N < Y - e)
or (abs(E + B) < e and Y < -e)
):
M = [[1, 0, 0], [0, 1, -E / abs(E)], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A6
if (
abs(N) > A + e
or (abs(A - N) < e and 2 * E < Y - e)
or (abs(A + N) < e and Y < -e)
):
M = [[1, 0, -N / abs(N)], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A7
if (
abs(Y) > A + e
or (abs(A - Y) < e and 2 * E < N - e)
or (abs(A + Y) < e and N < -e)
):
M = [[1, -Y / abs(Y), 0], [0, 1, 0], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
# A8
if E + N + Y + A + B < -e or (abs(E + N + Y + A + B) < e < Y + (A + N) * 2):
M = [[1, 0, 1], [0, 1, 1], [0, 0, 1]]
G = dot(transpose(M), dot(G, M))
continue
break
A = G[0, 0]
B = G[1, 1]
C = G[2, 2]
E = 2 * G[1, 2]
N = 2 * G[0, 2]
Y = 2 * G[0, 1]
a = math.sqrt(A)
b = math.sqrt(B)
c = math.sqrt(C)
alpha = math.acos(E / 2 / b / c) / math.pi * 180
beta = math.acos(N / 2 / a / c) / math.pi * 180
gamma = math.acos(Y / 2 / a / b) / math.pi * 180
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
mapped = self.find_mapping(latt, e, skip_rotation_matrix=True)
if mapped is not None:
if np.linalg.det(mapped[0].matrix) > 0:
return mapped[0]
else:
return Lattice(-mapped[0].matrix)
raise ValueError("can't find niggli") |
def _soap_client_call(method_name, *args):
"""Wrapper to call SoapClient method"""
# a new client instance is built for threading issues
soap_client = _build_soap_client()
soap_args = _convert_soap_method_args(*args)
# if pysimplesoap version requires it, apply a workaround for
# https://github.com/pysimplesoap/pysimplesoap/issues/31
if PYSIMPLESOAP_1_16_2:
return getattr(soap_client, method_name)(*soap_args)
else:
return getattr(soap_client, method_name)(soap_client, *soap_args) | Wrapper to call SoapClient method | Below is the the instruction that describes the task:
### Input:
Wrapper to call SoapClient method
### Response:
def _soap_client_call(method_name, *args):
"""Wrapper to call SoapClient method"""
# a new client instance is built for threading issues
soap_client = _build_soap_client()
soap_args = _convert_soap_method_args(*args)
# if pysimplesoap version requires it, apply a workaround for
# https://github.com/pysimplesoap/pysimplesoap/issues/31
if PYSIMPLESOAP_1_16_2:
return getattr(soap_client, method_name)(*soap_args)
else:
return getattr(soap_client, method_name)(soap_client, *soap_args) |
def row_cells(self, row_idx):
"""
Sequence of cells in the row at *row_idx* in this table.
"""
column_count = self._column_count
start = row_idx * column_count
end = start + column_count
return self._cells[start:end] | Sequence of cells in the row at *row_idx* in this table. | Below is the the instruction that describes the task:
### Input:
Sequence of cells in the row at *row_idx* in this table.
### Response:
def row_cells(self, row_idx):
"""
Sequence of cells in the row at *row_idx* in this table.
"""
column_count = self._column_count
start = row_idx * column_count
end = start + column_count
return self._cells[start:end] |
def make_result_response(self):
"""Create result response for the a "get" or "set" iq stanza.
:return: new `Iq` object with the same "id" as self, "from" and "to"
attributes replaced and type="result".
:returntype: `Iq`"""
if self.stanza_type not in ("set", "get"):
raise ValueError("Results may only be generated for"
" 'set' or 'get' iq")
stanza = Iq(stanza_type = "result", from_jid = self.to_jid,
to_jid = self.from_jid, stanza_id = self.stanza_id)
return stanza | Create result response for the a "get" or "set" iq stanza.
:return: new `Iq` object with the same "id" as self, "from" and "to"
attributes replaced and type="result".
:returntype: `Iq` | Below is the the instruction that describes the task:
### Input:
Create result response for the a "get" or "set" iq stanza.
:return: new `Iq` object with the same "id" as self, "from" and "to"
attributes replaced and type="result".
:returntype: `Iq`
### Response:
def make_result_response(self):
"""Create result response for the a "get" or "set" iq stanza.
:return: new `Iq` object with the same "id" as self, "from" and "to"
attributes replaced and type="result".
:returntype: `Iq`"""
if self.stanza_type not in ("set", "get"):
raise ValueError("Results may only be generated for"
" 'set' or 'get' iq")
stanza = Iq(stanza_type = "result", from_jid = self.to_jid,
to_jid = self.from_jid, stanza_id = self.stanza_id)
return stanza |
def infos(cls, fqdn):
""" Display information about hosted certificates for a fqdn. """
if isinstance(fqdn, (list, tuple)):
ids = []
for fqd_ in fqdn:
ids.extend(cls.infos(fqd_))
return ids
ids = cls.usable_id(fqdn)
if not ids:
return []
if not isinstance(ids, (list, tuple)):
ids = [ids]
return [cls.info(id_) for id_ in ids] | Display information about hosted certificates for a fqdn. | Below is the the instruction that describes the task:
### Input:
Display information about hosted certificates for a fqdn.
### Response:
def infos(cls, fqdn):
""" Display information about hosted certificates for a fqdn. """
if isinstance(fqdn, (list, tuple)):
ids = []
for fqd_ in fqdn:
ids.extend(cls.infos(fqd_))
return ids
ids = cls.usable_id(fqdn)
if not ids:
return []
if not isinstance(ids, (list, tuple)):
ids = [ids]
return [cls.info(id_) for id_ in ids] |
def cache(ignore=None):
"""Decorator for memoizing a function using either the filesystem or a
database.
"""
def decorator(func):
# Initialize both cached versions
joblib_cached = constants.joblib_memory.cache(func, ignore=ignore)
db_cached = DbMemoizedFunc(func, ignore)
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Dynamically choose the cache at call-time, not at import."""
if func.__name__ == '_sia' and not config.CACHE_SIAS:
f = func
elif config.CACHING_BACKEND == 'fs':
f = joblib_cached
elif config.CACHING_BACKEND == 'db':
f = db_cached
return f(*args, **kwargs)
return wrapper
return decorator | Decorator for memoizing a function using either the filesystem or a
database. | Below is the the instruction that describes the task:
### Input:
Decorator for memoizing a function using either the filesystem or a
database.
### Response:
def cache(ignore=None):
"""Decorator for memoizing a function using either the filesystem or a
database.
"""
def decorator(func):
# Initialize both cached versions
joblib_cached = constants.joblib_memory.cache(func, ignore=ignore)
db_cached = DbMemoizedFunc(func, ignore)
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Dynamically choose the cache at call-time, not at import."""
if func.__name__ == '_sia' and not config.CACHE_SIAS:
f = func
elif config.CACHING_BACKEND == 'fs':
f = joblib_cached
elif config.CACHING_BACKEND == 'db':
f = db_cached
return f(*args, **kwargs)
return wrapper
return decorator |
def point_displ(pt1, pt2):
""" Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring
"""
#Imports
import numpy as np
# Make iterable
if not np.iterable(pt1):
pt1 = np.float64(np.array([pt1]))
else:
pt1 = np.float64(np.array(pt1).squeeze())
## end if
if not np.iterable(pt2):
pt2 = np.float64(np.array([pt2]))
else:
pt2 = np.float64(np.array(pt2).squeeze())
## end if
# Calculate the displacement vector and return
displ = np.matrix(np.subtract(pt2, pt1)).reshape(3,1)
return displ | Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring | Below is the the instruction that describes the task:
### Input:
Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring
### Response:
def point_displ(pt1, pt2):
""" Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring
"""
#Imports
import numpy as np
# Make iterable
if not np.iterable(pt1):
pt1 = np.float64(np.array([pt1]))
else:
pt1 = np.float64(np.array(pt1).squeeze())
## end if
if not np.iterable(pt2):
pt2 = np.float64(np.array([pt2]))
else:
pt2 = np.float64(np.array(pt2).squeeze())
## end if
# Calculate the displacement vector and return
displ = np.matrix(np.subtract(pt2, pt1)).reshape(3,1)
return displ |
async def asgi_send(self, message: dict) -> None:
"""Called by the ASGI instance to send a message."""
if message["type"] == "http.response.start" and self.state == ASGIHTTPState.REQUEST:
self.response = message
elif message["type"] == "http.response.body" and self.state in {
ASGIHTTPState.REQUEST,
ASGIHTTPState.RESPONSE,
}:
if self.state == ASGIHTTPState.REQUEST:
headers = build_and_validate_headers(self.response["headers"])
headers.extend(self.response_headers())
await self.asend(
h11.Response(status_code=int(self.response["status"]), headers=headers)
)
self.state = ASGIHTTPState.RESPONSE
if (
not suppress_body(self.scope["method"], int(self.response["status"]))
and message.get("body", b"") != b""
):
await self.asend(h11.Data(data=bytes(message["body"])))
if not message.get("more_body", False):
if self.state != ASGIHTTPState.CLOSED:
await self.asend(h11.EndOfMessage())
await self.asgi_put({"type": "http.disconnect"})
self.state = ASGIHTTPState.CLOSED
else:
raise UnexpectedMessage(self.state, message["type"]) | Called by the ASGI instance to send a message. | Below is the the instruction that describes the task:
### Input:
Called by the ASGI instance to send a message.
### Response:
async def asgi_send(self, message: dict) -> None:
"""Called by the ASGI instance to send a message."""
if message["type"] == "http.response.start" and self.state == ASGIHTTPState.REQUEST:
self.response = message
elif message["type"] == "http.response.body" and self.state in {
ASGIHTTPState.REQUEST,
ASGIHTTPState.RESPONSE,
}:
if self.state == ASGIHTTPState.REQUEST:
headers = build_and_validate_headers(self.response["headers"])
headers.extend(self.response_headers())
await self.asend(
h11.Response(status_code=int(self.response["status"]), headers=headers)
)
self.state = ASGIHTTPState.RESPONSE
if (
not suppress_body(self.scope["method"], int(self.response["status"]))
and message.get("body", b"") != b""
):
await self.asend(h11.Data(data=bytes(message["body"])))
if not message.get("more_body", False):
if self.state != ASGIHTTPState.CLOSED:
await self.asend(h11.EndOfMessage())
await self.asgi_put({"type": "http.disconnect"})
self.state = ASGIHTTPState.CLOSED
else:
raise UnexpectedMessage(self.state, message["type"]) |
def digit(m: Union[int, pd.Series], n: int) -> Union[int, pd.Series]:
"""Returns the nth digit of each number in m."""
return (m // (10 ** n)) % 10 | Returns the nth digit of each number in m. | Below is the the instruction that describes the task:
### Input:
Returns the nth digit of each number in m.
### Response:
def digit(m: Union[int, pd.Series], n: int) -> Union[int, pd.Series]:
"""Returns the nth digit of each number in m."""
return (m // (10 ** n)) % 10 |
def process_item(self, item, spider):
"""
Process single item. Add item to items and then upload to S3 if size of items
>= max_chunk_size.
"""
self.items.append(item)
if len(self.items) >= self.max_chunk_size:
self._upload_chunk(spider)
return item | Process single item. Add item to items and then upload to S3 if size of items
>= max_chunk_size. | Below is the the instruction that describes the task:
### Input:
Process single item. Add item to items and then upload to S3 if size of items
>= max_chunk_size.
### Response:
def process_item(self, item, spider):
"""
Process single item. Add item to items and then upload to S3 if size of items
>= max_chunk_size.
"""
self.items.append(item)
if len(self.items) >= self.max_chunk_size:
self._upload_chunk(spider)
return item |
def _get_handling_triplet(self, node_id):
"""_get_handling_triplet(node_id) -> (handler, value, attrs)"""
handler = self._get_handler(node_id)
value = self[node_id]
attrs = self._get_attrs(node_id)
return handler, value, attrs | _get_handling_triplet(node_id) -> (handler, value, attrs) | Below is the the instruction that describes the task:
### Input:
_get_handling_triplet(node_id) -> (handler, value, attrs)
### Response:
def _get_handling_triplet(self, node_id):
"""_get_handling_triplet(node_id) -> (handler, value, attrs)"""
handler = self._get_handler(node_id)
value = self[node_id]
attrs = self._get_attrs(node_id)
return handler, value, attrs |
def get_now_utc():
''' date in UTC, ISO format'''
# Helper class for UTC time
# Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
#now = datetime.datetime.now(timezone.utc) # Python 3.2
now = datetime.datetime.now(UTC())
return now | date in UTC, ISO format | Below is the the instruction that describes the task:
### Input:
date in UTC, ISO format
### Response:
def get_now_utc():
''' date in UTC, ISO format'''
# Helper class for UTC time
# Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
#now = datetime.datetime.now(timezone.utc) # Python 3.2
now = datetime.datetime.now(UTC())
return now |
def count(self):
"""
If result is True, then the count will process result set , if
result if False, then only use condition to count
"""
if self._group_by or self._join or self.distinct_field:
return self.do_(self.get_query().limit(None).order_by(None).offset(None).alias().count()).scalar()
else:
return self.do_(self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)).scalar() | If result is True, then the count will process result set , if
result if False, then only use condition to count | Below is the the instruction that describes the task:
### Input:
If result is True, then the count will process result set , if
result if False, then only use condition to count
### Response:
def count(self):
"""
If result is True, then the count will process result set , if
result if False, then only use condition to count
"""
if self._group_by or self._join or self.distinct_field:
return self.do_(self.get_query().limit(None).order_by(None).offset(None).alias().count()).scalar()
else:
return self.do_(self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)).scalar() |
def slice(filename, number_tiles=None, col=None, row=None, save=True):
"""
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
"""
im = Image.open(filename)
im_w, im_h = im.size
columns = 0
rows = 0
if not number_tiles is None:
validate_image(im, number_tiles)
columns, rows = calc_columns_rows(number_tiles)
extras = (columns * rows) - number_tiles
else:
validate_image_col_row(im, col, row)
columns = col
rows = row
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if save:
save_tiles(tiles,
prefix=get_basename(filename),
directory=os.path.dirname(filename))
return tuple(tiles) | Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances. | Below is the the instruction that describes the task:
### Input:
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
### Response:
def slice(filename, number_tiles=None, col=None, row=None, save=True):
"""
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
"""
im = Image.open(filename)
im_w, im_h = im.size
columns = 0
rows = 0
if not number_tiles is None:
validate_image(im, number_tiles)
columns, rows = calc_columns_rows(number_tiles)
extras = (columns * rows) - number_tiles
else:
validate_image_col_row(im, col, row)
columns = col
rows = row
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if save:
save_tiles(tiles,
prefix=get_basename(filename),
directory=os.path.dirname(filename))
return tuple(tiles) |
def _feed_to_kafka(self, json_item):
"""Sends a request to Kafka
:param json_item: The json item to send
:returns: A boolean indicating whther the data was sent successfully or not
"""
@MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False)
def _feed(json_item):
try:
self.logger.debug("Sending json to kafka at " +
str(self.settings['KAFKA_PRODUCER_TOPIC']))
future = self.producer.send(self.settings['KAFKA_PRODUCER_TOPIC'],
json_item)
future.add_callback(self._kafka_success)
future.add_errback(self._kafka_failure)
self.producer.flush()
return True
except Exception as e:
self.logger.error("Lost connection to Kafka")
self._spawn_kafka_connection_thread()
return False
return _feed(json_item) | Sends a request to Kafka
:param json_item: The json item to send
:returns: A boolean indicating whther the data was sent successfully or not | Below is the the instruction that describes the task:
### Input:
Sends a request to Kafka
:param json_item: The json item to send
:returns: A boolean indicating whther the data was sent successfully or not
### Response:
def _feed_to_kafka(self, json_item):
"""Sends a request to Kafka
:param json_item: The json item to send
:returns: A boolean indicating whther the data was sent successfully or not
"""
@MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False)
def _feed(json_item):
try:
self.logger.debug("Sending json to kafka at " +
str(self.settings['KAFKA_PRODUCER_TOPIC']))
future = self.producer.send(self.settings['KAFKA_PRODUCER_TOPIC'],
json_item)
future.add_callback(self._kafka_success)
future.add_errback(self._kafka_failure)
self.producer.flush()
return True
except Exception as e:
self.logger.error("Lost connection to Kafka")
self._spawn_kafka_connection_thread()
return False
return _feed(json_item) |
def update_function(self, param_vals):
"""Takes an array param_vals, updates function, returns the new error"""
self.model = self.func(param_vals, *self.func_args, **self.func_kwargs)
d = self.calc_residuals()
return np.dot(d.flat, d.flat) | Takes an array param_vals, updates function, returns the new error | Below is the the instruction that describes the task:
### Input:
Takes an array param_vals, updates function, returns the new error
### Response:
def update_function(self, param_vals):
"""Takes an array param_vals, updates function, returns the new error"""
self.model = self.func(param_vals, *self.func_args, **self.func_kwargs)
d = self.calc_residuals()
return np.dot(d.flat, d.flat) |
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
) | Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order). | Below is the the instruction that describes the task:
### Input:
Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
### Response:
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
) |
def stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=0.05, minbin=9):
'''
This calculates the Stellingwerf PDM theta value at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_pdm : float
The value of the theta statistic at the specified `frequency`.
'''
period = 1.0/frequency
fold_time = times[0]
phased = phase_magseries(times,
mags,
period,
fold_time,
wrap=False,
sort=True)
phases = phased['phase']
pmags = phased['mags']
bins = nparange(0.0, 1.0, binsize)
binnedphaseinds = npdigitize(phases, bins)
binvariances = []
binndets = []
goodbins = 0
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_mags = pmags[thisbin_inds]
if thisbin_mags.size > minbin:
thisbin_variance = npvar(thisbin_mags,ddof=1)
binvariances.append(thisbin_variance)
binndets.append(thisbin_mags.size)
goodbins = goodbins + 1
# now calculate theta
binvariances = nparray(binvariances)
binndets = nparray(binndets)
theta_top = npsum(binvariances*(binndets - 1)) / (npsum(binndets) -
goodbins)
theta_bot = npvar(pmags,ddof=1)
theta = theta_top/theta_bot
return theta | This calculates the Stellingwerf PDM theta value at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_pdm : float
The value of the theta statistic at the specified `frequency`. | Below is the the instruction that describes the task:
### Input:
This calculates the Stellingwerf PDM theta value at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_pdm : float
The value of the theta statistic at the specified `frequency`.
### Response:
def stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=0.05, minbin=9):
'''
This calculates the Stellingwerf PDM theta value at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_pdm : float
The value of the theta statistic at the specified `frequency`.
'''
period = 1.0/frequency
fold_time = times[0]
phased = phase_magseries(times,
mags,
period,
fold_time,
wrap=False,
sort=True)
phases = phased['phase']
pmags = phased['mags']
bins = nparange(0.0, 1.0, binsize)
binnedphaseinds = npdigitize(phases, bins)
binvariances = []
binndets = []
goodbins = 0
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_mags = pmags[thisbin_inds]
if thisbin_mags.size > minbin:
thisbin_variance = npvar(thisbin_mags,ddof=1)
binvariances.append(thisbin_variance)
binndets.append(thisbin_mags.size)
goodbins = goodbins + 1
# now calculate theta
binvariances = nparray(binvariances)
binndets = nparray(binndets)
theta_top = npsum(binvariances*(binndets - 1)) / (npsum(binndets) -
goodbins)
theta_bot = npvar(pmags,ddof=1)
theta = theta_top/theta_bot
return theta |
def _compute_output_layer_expected(self):
"""Compute output layers expected that the IF will produce.
Be careful when you call this function. It's a private function, better
to use the public function `output_layers_expected()`.
:return: List of expected layer keys.
:rtype: list
"""
# Actually, an IF can produce maximum 6 layers, by default.
expected = [
layer_purpose_exposure_summary['key'], # 1
layer_purpose_aggregate_hazard_impacted['key'], # 2
layer_purpose_aggregation_summary['key'], # 3
layer_purpose_analysis_impacted['key'], # 4
layer_purpose_exposure_summary_table['key'], # 5
layer_purpose_profiling['key'], # 6
]
if is_raster_layer(self.exposure):
if self.exposure.keywords.get('layer_mode') == 'continuous':
# If the exposure is a continuous raster, we can't provide the
# exposure impacted layer.
expected.remove(layer_purpose_exposure_summary['key'])
if not self.exposure.keywords.get('classification'):
# If the exposure doesn't have a classification, such as population
# census layer, we can't provide an exposure breakdown layer.
expected.remove(layer_purpose_exposure_summary_table['key'])
# We add any layers produced by pre-processors
for preprocessor in self._preprocessors:
if preprocessor['output'].get('type') == 'layer':
expected.append(preprocessor['output'].get('value')['key'])
return expected | Compute output layers expected that the IF will produce.
Be careful when you call this function. It's a private function, better
to use the public function `output_layers_expected()`.
:return: List of expected layer keys.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Compute output layers expected that the IF will produce.
Be careful when you call this function. It's a private function, better
to use the public function `output_layers_expected()`.
:return: List of expected layer keys.
:rtype: list
### Response:
def _compute_output_layer_expected(self):
"""Compute output layers expected that the IF will produce.
Be careful when you call this function. It's a private function, better
to use the public function `output_layers_expected()`.
:return: List of expected layer keys.
:rtype: list
"""
# Actually, an IF can produce maximum 6 layers, by default.
expected = [
layer_purpose_exposure_summary['key'], # 1
layer_purpose_aggregate_hazard_impacted['key'], # 2
layer_purpose_aggregation_summary['key'], # 3
layer_purpose_analysis_impacted['key'], # 4
layer_purpose_exposure_summary_table['key'], # 5
layer_purpose_profiling['key'], # 6
]
if is_raster_layer(self.exposure):
if self.exposure.keywords.get('layer_mode') == 'continuous':
# If the exposure is a continuous raster, we can't provide the
# exposure impacted layer.
expected.remove(layer_purpose_exposure_summary['key'])
if not self.exposure.keywords.get('classification'):
# If the exposure doesn't have a classification, such as population
# census layer, we can't provide an exposure breakdown layer.
expected.remove(layer_purpose_exposure_summary_table['key'])
# We add any layers produced by pre-processors
for preprocessor in self._preprocessors:
if preprocessor['output'].get('type') == 'layer':
expected.append(preprocessor['output'].get('value')['key'])
return expected |
def list_upgrades(refresh=True, **kwargs):
'''
List all available package upgrades.
.. versionadded:: 2018.3.0
refresh
Whether or not to refresh the package database before installing.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
pkgs = {}
for pkg in sorted(list_pkgs(refresh=refresh).keys()):
# NOTE: we already optionally refreshed in de list_pkg call
pkg_upgrade = latest_version(pkg, refresh=False)
if pkg_upgrade:
pkgs[pkg] = pkg_upgrade
return pkgs | List all available package upgrades.
.. versionadded:: 2018.3.0
refresh
Whether or not to refresh the package database before installing.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades | Below is the the instruction that describes the task:
### Input:
List all available package upgrades.
.. versionadded:: 2018.3.0
refresh
Whether or not to refresh the package database before installing.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
### Response:
def list_upgrades(refresh=True, **kwargs):
'''
List all available package upgrades.
.. versionadded:: 2018.3.0
refresh
Whether or not to refresh the package database before installing.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
pkgs = {}
for pkg in sorted(list_pkgs(refresh=refresh).keys()):
# NOTE: we already optionally refreshed in de list_pkg call
pkg_upgrade = latest_version(pkg, refresh=False)
if pkg_upgrade:
pkgs[pkg] = pkg_upgrade
return pkgs |
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join([ '.COM', '.EXE', '.BAT', '.CMD' ])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext | Returns the path extensions from environment or a default | Below is the the instruction that describes the task:
### Input:
Returns the path extensions from environment or a default
### Response:
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join([ '.COM', '.EXE', '.BAT', '.CMD' ])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext |
def targeted_conjugate_about(tensor: np.ndarray,
target: np.ndarray,
indices: Sequence[int],
conj_indices: Sequence[int] = None,
buffer: Optional[np.ndarray] = None,
out: Optional[np.ndarray] = None) -> np.ndarray:
r"""Conjugates the given tensor about the target tensor.
This method computes a target tensor conjugated by another tensor.
Here conjugate is used in the sense of conjugating by a matrix, i.a.
A conjugated about B is $A B A^\dagger$ where $\dagger$ represents the
conjugate transpose.
Abstractly this compute $A \cdot B \cdot A^\dagger$ where A and B are
multi-dimensional arrays, and instead of matrix multiplication $\cdot$
is a contraction between the given indices (indices for first $\cdot$,
conj_indices for second $\cdot$).
More specifically this computes
sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}
* target_{k_0,...,k_{r-1},l_0,...,l_{r-1}
* tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*
where the sum is over indices where j_s = k_s and s is in `indices`
and l_s = m_s and s is in `conj_indices`.
Args:
tensor: The tensor that will be conjugated about the target tensor.
target: The tensor that will receive the conjugation.
indices: The indices which will be contracted between the tensor and
target.
conj_indices; The indices which will be contracted between the
complex conjugate of the tensor and the target. If this is None,
then these will be the values in indices plus half the number
of dimensions of the target (`ndim`). This is the most common case
and corresponds to the case where the target is an operator on
a n-dimensional tensor product space (here `n` would be `ndim`).
buffer: A buffer to store partial results in. If not specified or None,
a new buffer is used.
out: The buffer to store the results in. If not specified or None, a new
buffer is used. Must have the same shape as target.
Returns:
The result the conjugation.
"""
conj_indices = conj_indices or [i + target.ndim // 2 for i in indices]
first_multiply = targeted_left_multiply(tensor, target, indices, out=buffer)
return targeted_left_multiply(np.conjugate(tensor),
first_multiply,
conj_indices,
out=out) | r"""Conjugates the given tensor about the target tensor.
This method computes a target tensor conjugated by another tensor.
Here conjugate is used in the sense of conjugating by a matrix, i.a.
A conjugated about B is $A B A^\dagger$ where $\dagger$ represents the
conjugate transpose.
Abstractly this compute $A \cdot B \cdot A^\dagger$ where A and B are
multi-dimensional arrays, and instead of matrix multiplication $\cdot$
is a contraction between the given indices (indices for first $\cdot$,
conj_indices for second $\cdot$).
More specifically this computes
sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}
* target_{k_0,...,k_{r-1},l_0,...,l_{r-1}
* tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*
where the sum is over indices where j_s = k_s and s is in `indices`
and l_s = m_s and s is in `conj_indices`.
Args:
tensor: The tensor that will be conjugated about the target tensor.
target: The tensor that will receive the conjugation.
indices: The indices which will be contracted between the tensor and
target.
conj_indices; The indices which will be contracted between the
complex conjugate of the tensor and the target. If this is None,
then these will be the values in indices plus half the number
of dimensions of the target (`ndim`). This is the most common case
and corresponds to the case where the target is an operator on
a n-dimensional tensor product space (here `n` would be `ndim`).
buffer: A buffer to store partial results in. If not specified or None,
a new buffer is used.
out: The buffer to store the results in. If not specified or None, a new
buffer is used. Must have the same shape as target.
Returns:
The result the conjugation. | Below is the the instruction that describes the task:
### Input:
r"""Conjugates the given tensor about the target tensor.
This method computes a target tensor conjugated by another tensor.
Here conjugate is used in the sense of conjugating by a matrix, i.a.
A conjugated about B is $A B A^\dagger$ where $\dagger$ represents the
conjugate transpose.
Abstractly this compute $A \cdot B \cdot A^\dagger$ where A and B are
multi-dimensional arrays, and instead of matrix multiplication $\cdot$
is a contraction between the given indices (indices for first $\cdot$,
conj_indices for second $\cdot$).
More specifically this computes
sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}
* target_{k_0,...,k_{r-1},l_0,...,l_{r-1}
* tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*
where the sum is over indices where j_s = k_s and s is in `indices`
and l_s = m_s and s is in `conj_indices`.
Args:
tensor: The tensor that will be conjugated about the target tensor.
target: The tensor that will receive the conjugation.
indices: The indices which will be contracted between the tensor and
target.
conj_indices; The indices which will be contracted between the
complex conjugate of the tensor and the target. If this is None,
then these will be the values in indices plus half the number
of dimensions of the target (`ndim`). This is the most common case
and corresponds to the case where the target is an operator on
a n-dimensional tensor product space (here `n` would be `ndim`).
buffer: A buffer to store partial results in. If not specified or None,
a new buffer is used.
out: The buffer to store the results in. If not specified or None, a new
buffer is used. Must have the same shape as target.
Returns:
The result the conjugation.
### Response:
def targeted_conjugate_about(tensor: np.ndarray,
target: np.ndarray,
indices: Sequence[int],
conj_indices: Sequence[int] = None,
buffer: Optional[np.ndarray] = None,
out: Optional[np.ndarray] = None) -> np.ndarray:
r"""Conjugates the given tensor about the target tensor.
This method computes a target tensor conjugated by another tensor.
Here conjugate is used in the sense of conjugating by a matrix, i.a.
A conjugated about B is $A B A^\dagger$ where $\dagger$ represents the
conjugate transpose.
Abstractly this compute $A \cdot B \cdot A^\dagger$ where A and B are
multi-dimensional arrays, and instead of matrix multiplication $\cdot$
is a contraction between the given indices (indices for first $\cdot$,
conj_indices for second $\cdot$).
More specifically this computes
sum tensor_{i_0,...,i_{r-1},j_0,...,j_{r-1}}
* target_{k_0,...,k_{r-1},l_0,...,l_{r-1}
* tensor_{m_0,...,m_{r-1},n_0,...,n_{r-1}}^*
where the sum is over indices where j_s = k_s and s is in `indices`
and l_s = m_s and s is in `conj_indices`.
Args:
tensor: The tensor that will be conjugated about the target tensor.
target: The tensor that will receive the conjugation.
indices: The indices which will be contracted between the tensor and
target.
conj_indices; The indices which will be contracted between the
complex conjugate of the tensor and the target. If this is None,
then these will be the values in indices plus half the number
of dimensions of the target (`ndim`). This is the most common case
and corresponds to the case where the target is an operator on
a n-dimensional tensor product space (here `n` would be `ndim`).
buffer: A buffer to store partial results in. If not specified or None,
a new buffer is used.
out: The buffer to store the results in. If not specified or None, a new
buffer is used. Must have the same shape as target.
Returns:
The result the conjugation.
"""
conj_indices = conj_indices or [i + target.ndim // 2 for i in indices]
first_multiply = targeted_left_multiply(tensor, target, indices, out=buffer)
return targeted_left_multiply(np.conjugate(tensor),
first_multiply,
conj_indices,
out=out) |
def _updateNumbers(self, linenumers):
"""
add/remove line numbers
"""
b = self.blockCount()
c = b - linenumers
if c > 0:
# remove lines numbers
for _ in range(c):
# remove last line:
self.setFocus()
storeCursorPos = self.textCursor()
self.moveCursor(
QtGui.QTextCursor.End,
QtGui.QTextCursor.MoveAnchor)
self.moveCursor(
QtGui.QTextCursor.StartOfLine,
QtGui.QTextCursor.MoveAnchor)
self.moveCursor(
QtGui.QTextCursor.End,
QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().deletePreviousChar()
self.setTextCursor(storeCursorPos)
elif c < 0:
# add line numbers
for i in range(-c):
self.appendPlainText(str(b + i + 1)) | add/remove line numbers | Below is the the instruction that describes the task:
### Input:
add/remove line numbers
### Response:
def _updateNumbers(self, linenumers):
"""
add/remove line numbers
"""
b = self.blockCount()
c = b - linenumers
if c > 0:
# remove lines numbers
for _ in range(c):
# remove last line:
self.setFocus()
storeCursorPos = self.textCursor()
self.moveCursor(
QtGui.QTextCursor.End,
QtGui.QTextCursor.MoveAnchor)
self.moveCursor(
QtGui.QTextCursor.StartOfLine,
QtGui.QTextCursor.MoveAnchor)
self.moveCursor(
QtGui.QTextCursor.End,
QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().deletePreviousChar()
self.setTextCursor(storeCursorPos)
elif c < 0:
# add line numbers
for i in range(-c):
self.appendPlainText(str(b + i + 1)) |
def find_object(self, username, secret, domain=None, host_ip=None, service_id=None):
"""
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
"""
# Not sure yet if this is advisable... Older passwords can be overwritten...
search = Credential.search()
search = search.filter("term", username=username)
search = search.filter("term", secret=secret)
if domain:
search = search.filter("term", domain=domain)
else:
search = search.exclude("exists", field="domain")
if host_ip:
search = search.filter("term", host_ip=host_ip)
else:
search = search.exclude("exists", field="host_ip")
if service_id:
search = search.filter("term", service_id=service_id)
else:
search = search.exclude("exists", field="service_id")
if search.count():
result = search[0].execute()[0]
return result
else:
return None | Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. | Below is the the instruction that describes the task:
### Input:
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
### Response:
def find_object(self, username, secret, domain=None, host_ip=None, service_id=None):
"""
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
"""
# Not sure yet if this is advisable... Older passwords can be overwritten...
search = Credential.search()
search = search.filter("term", username=username)
search = search.filter("term", secret=secret)
if domain:
search = search.filter("term", domain=domain)
else:
search = search.exclude("exists", field="domain")
if host_ip:
search = search.filter("term", host_ip=host_ip)
else:
search = search.exclude("exists", field="host_ip")
if service_id:
search = search.filter("term", service_id=service_id)
else:
search = search.exclude("exists", field="service_id")
if search.count():
result = search[0].execute()[0]
return result
else:
return None |
def get_instance(self, payload):
"""
Build an instance of StyleSheetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
return StyleSheetInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) | Build an instance of StyleSheetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of StyleSheetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of StyleSheetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
return StyleSheetInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) |
def mag_discrepancy(RAW_IMU, ATTITUDE, inclination, declination=None):
'''give the magnitude of the discrepancy between observed and expected magnetic field'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
expected = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
mag = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
return degrees(expected.angle(mag)) | give the magnitude of the discrepancy between observed and expected magnetic field | Below is the the instruction that describes the task:
### Input:
give the magnitude of the discrepancy between observed and expected magnetic field
### Response:
def mag_discrepancy(RAW_IMU, ATTITUDE, inclination, declination=None):
'''give the magnitude of the discrepancy between observed and expected magnetic field'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
expected = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
mag = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
return degrees(expected.angle(mag)) |
def metaseries_description_metadata(description):
"""Return metatata from MetaSeries image description as dict."""
if not description.startswith('<MetaData>'):
raise ValueError('invalid MetaSeries image description')
from xml.etree import cElementTree as etree # delayed import
root = etree.fromstring(description)
types = {'float': float, 'int': int,
'bool': lambda x: asbool(x, 'on', 'off')}
def parse(root, result):
# recursive
for child in root:
attrib = child.attrib
if not attrib:
result[child.tag] = parse(child, {})
continue
if 'id' in attrib:
i = attrib['id']
t = attrib['type']
v = attrib['value']
if t in types:
result[i] = types[t](v)
else:
result[i] = v
return result
adict = parse(root, {})
if 'Description' in adict:
adict['Description'] = adict['Description'].replace(' ', '\n')
return adict | Return metatata from MetaSeries image description as dict. | Below is the the instruction that describes the task:
### Input:
Return metatata from MetaSeries image description as dict.
### Response:
def metaseries_description_metadata(description):
"""Return metatata from MetaSeries image description as dict."""
if not description.startswith('<MetaData>'):
raise ValueError('invalid MetaSeries image description')
from xml.etree import cElementTree as etree # delayed import
root = etree.fromstring(description)
types = {'float': float, 'int': int,
'bool': lambda x: asbool(x, 'on', 'off')}
def parse(root, result):
# recursive
for child in root:
attrib = child.attrib
if not attrib:
result[child.tag] = parse(child, {})
continue
if 'id' in attrib:
i = attrib['id']
t = attrib['type']
v = attrib['value']
if t in types:
result[i] = types[t](v)
else:
result[i] = v
return result
adict = parse(root, {})
if 'Description' in adict:
adict['Description'] = adict['Description'].replace(' ', '\n')
return adict |
def get_assessments(self):
"""Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) | Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assessments(self):
"""Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) |
def simulate_system(self, parameters, initial_conditions, timepoints,
max_moment_order=1, number_of_processes=1):
"""
Perform Gillespie SSA simulations and returns trajectories for of each species.
Each trajectory is interpolated at the given time points.
By default, the average amounts of species for all simulations is returned.
:param parameters: list of the initial values for the constants in the model.
Must be in the same order as in the model
:param initial_conditions: List of the initial values for the equations in the problem.
Must be in the same order as these equations occur.
:param timepoints: A list of time points to simulate the system for
:param number_of_processes: the number of parallel process to be run
:param max_moment_order: the highest moment order to calculate the trajectories to.
if set to zero, the individual trajectories will be returned, instead of
the averaged moments.
E.g. a value of one will return means, a values of two, means, variances and covariance and so on.
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem,
or a list of lists of trajectories (one per simulation) if `return_average == False`.
:rtype: list[:class:`~means.simulation.Trajectory`]
"""
max_moment_order = int(max_moment_order)
assert(max_moment_order >= 0)
n_simulations = self.__n_simulations
self._validate_parameters(parameters, initial_conditions)
t_max= max(timepoints)
substitution_pairs = dict(zip(self.__problem.parameters, parameters))
propensities = substitute_all(self.__problem.propensities, substitution_pairs)
# lambdify the propensities for fast evaluation
propensities_as_function = self.__problem.propensities_as_function
def f(*species_parameters):
return propensities_as_function(*(np.concatenate((species_parameters, parameters))))
population_rates_as_function = f
if not self.__random_seed:
seed_for_processes = [None] * n_simulations
else:
seed_for_processes = [i for i in range(self.__random_seed, n_simulations + self.__random_seed)]
if number_of_processes ==1:
ssa_generator = _SSAGenerator(population_rates_as_function,
self.__problem.change, self.__problem.species,
initial_conditions, t_max, seed=self.__random_seed)
results = map(ssa_generator.generate_single_simulation, seed_for_processes)
else:
p = multiprocessing.Pool(number_of_processes,
initializer=multiprocessing_pool_initialiser,
initargs=[population_rates_as_function, self.__problem.change,
self.__problem.species,
initial_conditions, t_max, self.__random_seed])
results = p.map(multiprocessing_apply_ssa, seed_for_processes)
p.close()
p.join()
resampled_results = [[traj.resample(timepoints, extrapolate=True) for traj in res] for res in results]
for i in resampled_results:
idx = len(i[0].values) - 1
if max_moment_order == 0:
# Return a list of TrajectoryCollection objects
return map(TrajectoryCollection, resampled_results)
moments = self._compute_moments(resampled_results, max_moment_order)
return TrajectoryCollection(moments) | Perform Gillespie SSA simulations and returns trajectories for of each species.
Each trajectory is interpolated at the given time points.
By default, the average amounts of species for all simulations is returned.
:param parameters: list of the initial values for the constants in the model.
Must be in the same order as in the model
:param initial_conditions: List of the initial values for the equations in the problem.
Must be in the same order as these equations occur.
:param timepoints: A list of time points to simulate the system for
:param number_of_processes: the number of parallel process to be run
:param max_moment_order: the highest moment order to calculate the trajectories to.
if set to zero, the individual trajectories will be returned, instead of
the averaged moments.
E.g. a value of one will return means, a values of two, means, variances and covariance and so on.
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem,
or a list of lists of trajectories (one per simulation) if `return_average == False`.
:rtype: list[:class:`~means.simulation.Trajectory`] | Below is the the instruction that describes the task:
### Input:
Perform Gillespie SSA simulations and returns trajectories for of each species.
Each trajectory is interpolated at the given time points.
By default, the average amounts of species for all simulations is returned.
:param parameters: list of the initial values for the constants in the model.
Must be in the same order as in the model
:param initial_conditions: List of the initial values for the equations in the problem.
Must be in the same order as these equations occur.
:param timepoints: A list of time points to simulate the system for
:param number_of_processes: the number of parallel process to be run
:param max_moment_order: the highest moment order to calculate the trajectories to.
if set to zero, the individual trajectories will be returned, instead of
the averaged moments.
E.g. a value of one will return means, a values of two, means, variances and covariance and so on.
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem,
or a list of lists of trajectories (one per simulation) if `return_average == False`.
:rtype: list[:class:`~means.simulation.Trajectory`]
### Response:
def simulate_system(self, parameters, initial_conditions, timepoints,
max_moment_order=1, number_of_processes=1):
"""
Perform Gillespie SSA simulations and returns trajectories for of each species.
Each trajectory is interpolated at the given time points.
By default, the average amounts of species for all simulations is returned.
:param parameters: list of the initial values for the constants in the model.
Must be in the same order as in the model
:param initial_conditions: List of the initial values for the equations in the problem.
Must be in the same order as these equations occur.
:param timepoints: A list of time points to simulate the system for
:param number_of_processes: the number of parallel process to be run
:param max_moment_order: the highest moment order to calculate the trajectories to.
if set to zero, the individual trajectories will be returned, instead of
the averaged moments.
E.g. a value of one will return means, a values of two, means, variances and covariance and so on.
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem,
or a list of lists of trajectories (one per simulation) if `return_average == False`.
:rtype: list[:class:`~means.simulation.Trajectory`]
"""
max_moment_order = int(max_moment_order)
assert(max_moment_order >= 0)
n_simulations = self.__n_simulations
self._validate_parameters(parameters, initial_conditions)
t_max= max(timepoints)
substitution_pairs = dict(zip(self.__problem.parameters, parameters))
propensities = substitute_all(self.__problem.propensities, substitution_pairs)
# lambdify the propensities for fast evaluation
propensities_as_function = self.__problem.propensities_as_function
def f(*species_parameters):
return propensities_as_function(*(np.concatenate((species_parameters, parameters))))
population_rates_as_function = f
if not self.__random_seed:
seed_for_processes = [None] * n_simulations
else:
seed_for_processes = [i for i in range(self.__random_seed, n_simulations + self.__random_seed)]
if number_of_processes ==1:
ssa_generator = _SSAGenerator(population_rates_as_function,
self.__problem.change, self.__problem.species,
initial_conditions, t_max, seed=self.__random_seed)
results = map(ssa_generator.generate_single_simulation, seed_for_processes)
else:
p = multiprocessing.Pool(number_of_processes,
initializer=multiprocessing_pool_initialiser,
initargs=[population_rates_as_function, self.__problem.change,
self.__problem.species,
initial_conditions, t_max, self.__random_seed])
results = p.map(multiprocessing_apply_ssa, seed_for_processes)
p.close()
p.join()
resampled_results = [[traj.resample(timepoints, extrapolate=True) for traj in res] for res in results]
for i in resampled_results:
idx = len(i[0].values) - 1
if max_moment_order == 0:
# Return a list of TrajectoryCollection objects
return map(TrajectoryCollection, resampled_results)
moments = self._compute_moments(resampled_results, max_moment_order)
return TrajectoryCollection(moments) |
def _login(session):
"""Login to UPS."""
resp = session.get(LOGIN_URL, params=_get_params(session.auth.locale))
parsed = BeautifulSoup(resp.text, HTML_PARSER)
csrf = parsed.find(CSRF_FIND_TAG, CSRF_FIND_ATTR).get(VALUE_ATTR)
resp = session.post(LOGIN_URL, {
'userID': session.auth.username,
'password': session.auth.password,
'loginAction': 'X',
'CSRFToken': csrf,
'loc': session.auth.locale
})
if resp.status_code == 403:
raise UPSError('login failure')
parsed = BeautifulSoup(resp.text, HTML_PARSER)
error = parsed.find(ERROR_FIND_TAG, ERROR_FIND_ATTR)
if error and error.string:
raise UPSError(error.string.strip())
_save_cookies(session.cookies, session.auth.cookie_path) | Login to UPS. | Below is the the instruction that describes the task:
### Input:
Login to UPS.
### Response:
def _login(session):
"""Login to UPS."""
resp = session.get(LOGIN_URL, params=_get_params(session.auth.locale))
parsed = BeautifulSoup(resp.text, HTML_PARSER)
csrf = parsed.find(CSRF_FIND_TAG, CSRF_FIND_ATTR).get(VALUE_ATTR)
resp = session.post(LOGIN_URL, {
'userID': session.auth.username,
'password': session.auth.password,
'loginAction': 'X',
'CSRFToken': csrf,
'loc': session.auth.locale
})
if resp.status_code == 403:
raise UPSError('login failure')
parsed = BeautifulSoup(resp.text, HTML_PARSER)
error = parsed.find(ERROR_FIND_TAG, ERROR_FIND_ATTR)
if error and error.string:
raise UPSError(error.string.strip())
_save_cookies(session.cookies, session.auth.cookie_path) |
def add_prefix(self, ncname: str) -> None:
""" Look up ncname and add it to the prefix map if necessary
@param ncname: name to add
"""
if ncname not in self.prefixmap:
uri = cu.expand_uri(ncname + ':', self.curi_maps)
if uri and '://' in uri:
self.prefixmap[ncname] = uri
else:
print(f"Unrecognized prefix: {ncname}", file=sys.stderr)
self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/" | Look up ncname and add it to the prefix map if necessary
@param ncname: name to add | Below is the the instruction that describes the task:
### Input:
Look up ncname and add it to the prefix map if necessary
@param ncname: name to add
### Response:
def add_prefix(self, ncname: str) -> None:
""" Look up ncname and add it to the prefix map if necessary
@param ncname: name to add
"""
if ncname not in self.prefixmap:
uri = cu.expand_uri(ncname + ':', self.curi_maps)
if uri and '://' in uri:
self.prefixmap[ncname] = uri
else:
print(f"Unrecognized prefix: {ncname}", file=sys.stderr)
self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/" |
def get(self, sid):
"""
Constructs a FeedbackSummaryContext
:param sid: A string that uniquely identifies this feedback summary resource
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
"""
return FeedbackSummaryContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a FeedbackSummaryContext
:param sid: A string that uniquely identifies this feedback summary resource
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext | Below is the the instruction that describes the task:
### Input:
Constructs a FeedbackSummaryContext
:param sid: A string that uniquely identifies this feedback summary resource
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
### Response:
def get(self, sid):
"""
Constructs a FeedbackSummaryContext
:param sid: A string that uniquely identifies this feedback summary resource
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryContext
"""
return FeedbackSummaryContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) |
def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
duration = workunit.duration()
timing = '{:.3f}'.format(duration)
unaccounted_time = ''
# Background work may be idle a lot, no point in reporting that as unaccounted.
if self.is_under_main_root(workunit):
unaccounted_time_secs = workunit.unaccounted_time()
if unaccounted_time_secs >= 1 and unaccounted_time_secs > 0.05 * duration:
unaccounted_time = '{:.3f}'.format(unaccounted_time_secs)
status = HtmlReporter._outcome_css_classes[workunit.outcome()]
if workunit.has_label(WorkUnitLabel.TOOL):
self._emit(self._end_tool_invocation_fmt_string.format(
id=workunit.id,
status=status
))
self._emit(self._end_workunit_fmt_string.format(
id=workunit.id,
status=status,
timing=timing,
unaccounted_time=unaccounted_time,
aborted='true' if workunit.outcome() == WorkUnit.ABORTED else 'false'
))
# If we're a root workunit, force an overwrite, as we may be the last ever write in this run.
force_overwrite = workunit.parent is None
# Update the timings.
def render_timings(timings):
timings_dict = timings.get_all()
for item in timings_dict:
item['timing_string'] = '{:.3f}'.format(item['timing'])
res = ['<table>']
for item in timings_dict:
res.append("""<tr><td class="timing-string">{timing:.3f}</td>
<td class="timing-label">{label}""".format(
timing=item['timing'],
label=item['label']
))
if item['is_tool']:
res.append("""<i class="icon-cog"></i>""")
res.append("""</td></tr>""")
res.append('<table>')
return ''.join(res)
self._overwrite('cumulative_timings',
lambda: render_timings(self.run_tracker.cumulative_timings),
force=force_overwrite)
self._overwrite('self_timings',
lambda: render_timings(self.run_tracker.self_timings),
force=force_overwrite)
# Update the artifact cache stats.
def render_cache_stats(artifact_cache_stats):
def fix_detail_id(e, _id):
return e if isinstance(e, string_types) else e + (_id, )
msg_elements = []
for cache_name, stat in artifact_cache_stats.stats_per_cache.items():
# TODO consider display causes for hit/miss targets
hit_targets = [tgt for tgt, cause in stat.hit_targets]
miss_targets = [tgt for tgt, cause in stat.miss_targets]
msg_elements.extend([
cache_name + ' artifact cache: ',
# Explicitly set the detail ids, so their displayed/hidden state survives a refresh.
fix_detail_id(items_to_report_element(hit_targets, 'hit'), 'cache-hit-details'),
', ',
fix_detail_id(items_to_report_element(miss_targets, 'miss'), 'cache-miss-details'),
'.'
])
if not msg_elements:
msg_elements = ['No artifact cache use.']
return self._render_message(*msg_elements)
self._overwrite('artifact_cache_stats',
lambda: render_cache_stats(self.run_tracker.artifact_cache_stats),
force=force_overwrite)
for f in self._output_files[workunit.id].values():
f.close() | Implementation of Reporter callback. | Below is the the instruction that describes the task:
### Input:
Implementation of Reporter callback.
### Response:
def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
duration = workunit.duration()
timing = '{:.3f}'.format(duration)
unaccounted_time = ''
# Background work may be idle a lot, no point in reporting that as unaccounted.
if self.is_under_main_root(workunit):
unaccounted_time_secs = workunit.unaccounted_time()
if unaccounted_time_secs >= 1 and unaccounted_time_secs > 0.05 * duration:
unaccounted_time = '{:.3f}'.format(unaccounted_time_secs)
status = HtmlReporter._outcome_css_classes[workunit.outcome()]
if workunit.has_label(WorkUnitLabel.TOOL):
self._emit(self._end_tool_invocation_fmt_string.format(
id=workunit.id,
status=status
))
self._emit(self._end_workunit_fmt_string.format(
id=workunit.id,
status=status,
timing=timing,
unaccounted_time=unaccounted_time,
aborted='true' if workunit.outcome() == WorkUnit.ABORTED else 'false'
))
# If we're a root workunit, force an overwrite, as we may be the last ever write in this run.
force_overwrite = workunit.parent is None
# Update the timings.
def render_timings(timings):
timings_dict = timings.get_all()
for item in timings_dict:
item['timing_string'] = '{:.3f}'.format(item['timing'])
res = ['<table>']
for item in timings_dict:
res.append("""<tr><td class="timing-string">{timing:.3f}</td>
<td class="timing-label">{label}""".format(
timing=item['timing'],
label=item['label']
))
if item['is_tool']:
res.append("""<i class="icon-cog"></i>""")
res.append("""</td></tr>""")
res.append('<table>')
return ''.join(res)
self._overwrite('cumulative_timings',
lambda: render_timings(self.run_tracker.cumulative_timings),
force=force_overwrite)
self._overwrite('self_timings',
lambda: render_timings(self.run_tracker.self_timings),
force=force_overwrite)
# Update the artifact cache stats.
def render_cache_stats(artifact_cache_stats):
def fix_detail_id(e, _id):
return e if isinstance(e, string_types) else e + (_id, )
msg_elements = []
for cache_name, stat in artifact_cache_stats.stats_per_cache.items():
# TODO consider display causes for hit/miss targets
hit_targets = [tgt for tgt, cause in stat.hit_targets]
miss_targets = [tgt for tgt, cause in stat.miss_targets]
msg_elements.extend([
cache_name + ' artifact cache: ',
# Explicitly set the detail ids, so their displayed/hidden state survives a refresh.
fix_detail_id(items_to_report_element(hit_targets, 'hit'), 'cache-hit-details'),
', ',
fix_detail_id(items_to_report_element(miss_targets, 'miss'), 'cache-miss-details'),
'.'
])
if not msg_elements:
msg_elements = ['No artifact cache use.']
return self._render_message(*msg_elements)
self._overwrite('artifact_cache_stats',
lambda: render_cache_stats(self.run_tracker.artifact_cache_stats),
force=force_overwrite)
for f in self._output_files[workunit.id].values():
f.close() |
def get_iso_time(date_part, time_part):
r"""Combign date and time into an iso datetime."""
str_date = datetime.datetime.strptime(
date_part, '%m/%d/%Y').strftime('%Y-%m-%d')
str_time = datetime.datetime.strptime(
time_part, '%I:%M %p').strftime('%H:%M:%S')
return str_date + "T" + str_time + "-7:00" | r"""Combign date and time into an iso datetime. | Below is the the instruction that describes the task:
### Input:
r"""Combign date and time into an iso datetime.
### Response:
def get_iso_time(date_part, time_part):
r"""Combign date and time into an iso datetime."""
str_date = datetime.datetime.strptime(
date_part, '%m/%d/%Y').strftime('%Y-%m-%d')
str_time = datetime.datetime.strptime(
time_part, '%I:%M %p').strftime('%H:%M:%S')
return str_date + "T" + str_time + "-7:00" |
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token | Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1 | Below is the the instruction that describes the task:
### Input:
Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
### Response:
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token |
def setup_axes(rows=1, cols=1, figsize=(8, 6), expand=True, tight_layout=None, **kwargs):
"""
Sets up a figure of size *figsize* with a number of rows (*rows*) and columns (*cols*). \*\*kwargs passed through to :meth:`matplotlib.figure.Figure.add_subplot`.
.. versionadded:: 1.2
Parameters
----------
rows : int
Number of rows to create.
cols : int
Number of columns to create.
figsize : tuple
Size of figure to create.
expand : bool
Make the entire figure with size `figsize`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure.
axes : :class:`numpy.ndarray`
An array of all of the axes. (Unless there's only one axis, in which case it returns an object instance :class:`matplotlib.axis.Axis`.)
"""
if expand:
figsize = (figsize[0]*cols, figsize[1]*rows)
figargs = {}
if isinstance(tight_layout, dict):
figargs["tight_layout"] = tight_layout
elif tight_layout == "pdf":
figargs["tight_layout"] = {"rect": (0, 0, 1, 0.95)}
dpi = kwargs.pop('dpi', None)
fig, gs = _setup_figure(rows=rows, cols=cols, figsize=figsize, dpi=dpi, **figargs)
axes = _np.empty(shape=(rows, cols), dtype=object)
for i in range(rows):
for j in range(cols):
axes[i, j] = fig.add_subplot(gs[i, j], **kwargs)
if axes.shape == (1, 1):
return fig, axes[0, 0]
else:
return fig, axes | Sets up a figure of size *figsize* with a number of rows (*rows*) and columns (*cols*). \*\*kwargs passed through to :meth:`matplotlib.figure.Figure.add_subplot`.
.. versionadded:: 1.2
Parameters
----------
rows : int
Number of rows to create.
cols : int
Number of columns to create.
figsize : tuple
Size of figure to create.
expand : bool
Make the entire figure with size `figsize`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure.
axes : :class:`numpy.ndarray`
An array of all of the axes. (Unless there's only one axis, in which case it returns an object instance :class:`matplotlib.axis.Axis`.) | Below is the the instruction that describes the task:
### Input:
Sets up a figure of size *figsize* with a number of rows (*rows*) and columns (*cols*). \*\*kwargs passed through to :meth:`matplotlib.figure.Figure.add_subplot`.
.. versionadded:: 1.2
Parameters
----------
rows : int
Number of rows to create.
cols : int
Number of columns to create.
figsize : tuple
Size of figure to create.
expand : bool
Make the entire figure with size `figsize`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure.
axes : :class:`numpy.ndarray`
An array of all of the axes. (Unless there's only one axis, in which case it returns an object instance :class:`matplotlib.axis.Axis`.)
### Response:
def setup_axes(rows=1, cols=1, figsize=(8, 6), expand=True, tight_layout=None, **kwargs):
"""
Sets up a figure of size *figsize* with a number of rows (*rows*) and columns (*cols*). \*\*kwargs passed through to :meth:`matplotlib.figure.Figure.add_subplot`.
.. versionadded:: 1.2
Parameters
----------
rows : int
Number of rows to create.
cols : int
Number of columns to create.
figsize : tuple
Size of figure to create.
expand : bool
Make the entire figure with size `figsize`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure.
axes : :class:`numpy.ndarray`
An array of all of the axes. (Unless there's only one axis, in which case it returns an object instance :class:`matplotlib.axis.Axis`.)
"""
if expand:
figsize = (figsize[0]*cols, figsize[1]*rows)
figargs = {}
if isinstance(tight_layout, dict):
figargs["tight_layout"] = tight_layout
elif tight_layout == "pdf":
figargs["tight_layout"] = {"rect": (0, 0, 1, 0.95)}
dpi = kwargs.pop('dpi', None)
fig, gs = _setup_figure(rows=rows, cols=cols, figsize=figsize, dpi=dpi, **figargs)
axes = _np.empty(shape=(rows, cols), dtype=object)
for i in range(rows):
for j in range(cols):
axes[i, j] = fig.add_subplot(gs[i, j], **kwargs)
if axes.shape == (1, 1):
return fig, axes[0, 0]
else:
return fig, axes |
def check_dihedral(self, construction_table):
"""Checks, if the dihedral defining atom is colinear.
Checks for each index starting from the third row of the
``construction_table``, if the reference atoms are colinear.
Args:
construction_table (pd.DataFrame):
Returns:
list: A list of problematic indices.
"""
c_table = construction_table
angles = self.get_angle_degrees(c_table.iloc[3:, :].values)
problem_index = np.nonzero((175 < angles) | (angles < 5))[0]
rename = dict(enumerate(c_table.index[3:]))
problem_index = [rename[i] for i in problem_index]
return problem_index | Checks, if the dihedral defining atom is colinear.
Checks for each index starting from the third row of the
``construction_table``, if the reference atoms are colinear.
Args:
construction_table (pd.DataFrame):
Returns:
list: A list of problematic indices. | Below is the the instruction that describes the task:
### Input:
Checks, if the dihedral defining atom is colinear.
Checks for each index starting from the third row of the
``construction_table``, if the reference atoms are colinear.
Args:
construction_table (pd.DataFrame):
Returns:
list: A list of problematic indices.
### Response:
def check_dihedral(self, construction_table):
"""Checks, if the dihedral defining atom is colinear.
Checks for each index starting from the third row of the
``construction_table``, if the reference atoms are colinear.
Args:
construction_table (pd.DataFrame):
Returns:
list: A list of problematic indices.
"""
c_table = construction_table
angles = self.get_angle_degrees(c_table.iloc[3:, :].values)
problem_index = np.nonzero((175 < angles) | (angles < 5))[0]
rename = dict(enumerate(c_table.index[3:]))
problem_index = [rename[i] for i in problem_index]
return problem_index |
def add_personalization(self, personalization, index=0):
"""Add a Personaliztion object
:param personalizations: Add a Personalization object
:type personalizations: Personalization
:param index: The index where to add the Personalization
:type index: int
"""
self._personalizations = self._ensure_append(
personalization, self._personalizations, index) | Add a Personaliztion object
:param personalizations: Add a Personalization object
:type personalizations: Personalization
:param index: The index where to add the Personalization
:type index: int | Below is the the instruction that describes the task:
### Input:
Add a Personaliztion object
:param personalizations: Add a Personalization object
:type personalizations: Personalization
:param index: The index where to add the Personalization
:type index: int
### Response:
def add_personalization(self, personalization, index=0):
"""Add a Personaliztion object
:param personalizations: Add a Personalization object
:type personalizations: Personalization
:param index: The index where to add the Personalization
:type index: int
"""
self._personalizations = self._ensure_append(
personalization, self._personalizations, index) |
def normalizeURL(url):
"""Normalize a URL, converting normalization failures to
DiscoveryFailure"""
try:
normalized = urinorm.urinorm(url)
except ValueError as why:
raise DiscoveryFailure('Normalizing identifier: %s' % (why, ), None)
else:
return urllib.parse.urldefrag(normalized)[0] | Normalize a URL, converting normalization failures to
DiscoveryFailure | Below is the the instruction that describes the task:
### Input:
Normalize a URL, converting normalization failures to
DiscoveryFailure
### Response:
def normalizeURL(url):
"""Normalize a URL, converting normalization failures to
DiscoveryFailure"""
try:
normalized = urinorm.urinorm(url)
except ValueError as why:
raise DiscoveryFailure('Normalizing identifier: %s' % (why, ), None)
else:
return urllib.parse.urldefrag(normalized)[0] |
def symmetric_difference(self, other):
"""Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
"""
operation = bool.__xor__
self.cross_product(other, operation)
return self | Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA | Below is the the instruction that describes the task:
### Input:
Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
### Response:
def symmetric_difference(self, other):
"""Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
"""
operation = bool.__xor__
self.cross_product(other, operation)
return self |
def get_create_batch_env_fun(batch_env_fn, time_limit):
"""Factory for dopamine environment initialization function.
Args:
batch_env_fn: function(in_graph: bool) -> batch environment.
time_limit: time steps limit for environment.
Returns:
function (with optional, unused parameters) initializing environment.
"""
def create_env_fun(game_name=None, sticky_actions=None):
del game_name, sticky_actions
batch_env = batch_env_fn(in_graph=False)
batch_env = ResizeBatchObservation(batch_env) # pylint: disable=redefined-variable-type
batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit)
return batch_env
return create_env_fun | Factory for dopamine environment initialization function.
Args:
batch_env_fn: function(in_graph: bool) -> batch environment.
time_limit: time steps limit for environment.
Returns:
function (with optional, unused parameters) initializing environment. | Below is the the instruction that describes the task:
### Input:
Factory for dopamine environment initialization function.
Args:
batch_env_fn: function(in_graph: bool) -> batch environment.
time_limit: time steps limit for environment.
Returns:
function (with optional, unused parameters) initializing environment.
### Response:
def get_create_batch_env_fun(batch_env_fn, time_limit):
"""Factory for dopamine environment initialization function.
Args:
batch_env_fn: function(in_graph: bool) -> batch environment.
time_limit: time steps limit for environment.
Returns:
function (with optional, unused parameters) initializing environment.
"""
def create_env_fun(game_name=None, sticky_actions=None):
del game_name, sticky_actions
batch_env = batch_env_fn(in_graph=False)
batch_env = ResizeBatchObservation(batch_env) # pylint: disable=redefined-variable-type
batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit)
return batch_env
return create_env_fun |
def remove_event_subscriber(self, name, ws):
"""
Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket
"""
if name in self.available_events and \
ws in self.available_events[name]['subscribers']:
self.available_events[name]['subscribers'].remove(ws) | Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket | Below is the the instruction that describes the task:
### Input:
Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket
### Response:
def remove_event_subscriber(self, name, ws):
"""
Remove a websocket subscriber from an event.
name -- name of the event
ws -- the websocket
"""
if name in self.available_events and \
ws in self.available_events[name]['subscribers']:
self.available_events[name]['subscribers'].remove(ws) |
def chunks(self, size=32, alignment=1):
"""Iterate over all segments and return chunks of the data aligned as
given by `alignment`. `size` must be a multiple of
`alignment`. Each chunk is returned as a named two-tuple of
its address and data.
"""
if (size % alignment) != 0:
raise Error(
'size {} is not a multiple of alignment {}'.format(
size,
alignment))
for segment in self:
for chunk in segment.chunks(size, alignment):
yield chunk | Iterate over all segments and return chunks of the data aligned as
given by `alignment`. `size` must be a multiple of
`alignment`. Each chunk is returned as a named two-tuple of
its address and data. | Below is the the instruction that describes the task:
### Input:
Iterate over all segments and return chunks of the data aligned as
given by `alignment`. `size` must be a multiple of
`alignment`. Each chunk is returned as a named two-tuple of
its address and data.
### Response:
def chunks(self, size=32, alignment=1):
"""Iterate over all segments and return chunks of the data aligned as
given by `alignment`. `size` must be a multiple of
`alignment`. Each chunk is returned as a named two-tuple of
its address and data.
"""
if (size % alignment) != 0:
raise Error(
'size {} is not a multiple of alignment {}'.format(
size,
alignment))
for segment in self:
for chunk in segment.chunks(size, alignment):
yield chunk |
def setRecords(self, records):
"""
Manually sets the list of records that will be displayed in this tree.
This is a shortcut method to creating a RecordSet with a list of records
and assigning it to the tree.
:param records | [<orb.Table>, ..]
"""
self._searchTerms = ''
if not isinstance(records, RecordSet):
records = RecordSet(records)
self.setRecordSet(records) | Manually sets the list of records that will be displayed in this tree.
This is a shortcut method to creating a RecordSet with a list of records
and assigning it to the tree.
:param records | [<orb.Table>, ..] | Below is the the instruction that describes the task:
### Input:
Manually sets the list of records that will be displayed in this tree.
This is a shortcut method to creating a RecordSet with a list of records
and assigning it to the tree.
:param records | [<orb.Table>, ..]
### Response:
def setRecords(self, records):
"""
Manually sets the list of records that will be displayed in this tree.
This is a shortcut method to creating a RecordSet with a list of records
and assigning it to the tree.
:param records | [<orb.Table>, ..]
"""
self._searchTerms = ''
if not isinstance(records, RecordSet):
records = RecordSet(records)
self.setRecordSet(records) |
def restoreXml(self, xml):
"""
Saves the logging settings for this widget to XML format.
:param xml | <xml.etree.ElementTree.Element>
"""
self.uiFilterTXT.setText(xml.get('filter', ''))
xlevels = xml.find('levels')
xloggerlevels = xml.find('logger_levels')
xtree = xml.find('tree')
if xlevels is not None and xlevels.text:
self.setActiveLevels(map(int, xlevels.text.split(',')))
if xloggerlevels is not None and xloggerlevels.text:
for key in xloggerlevels.text.split(','):
logger, lvl = key.split(':')
lvl = int(lvl)
self.setLoggerLevel(logger, lvl)
if xtree is not None:
self.uiRecordTREE.restoreXml(xtree) | Saves the logging settings for this widget to XML format.
:param xml | <xml.etree.ElementTree.Element> | Below is the the instruction that describes the task:
### Input:
Saves the logging settings for this widget to XML format.
:param xml | <xml.etree.ElementTree.Element>
### Response:
def restoreXml(self, xml):
"""
Saves the logging settings for this widget to XML format.
:param xml | <xml.etree.ElementTree.Element>
"""
self.uiFilterTXT.setText(xml.get('filter', ''))
xlevels = xml.find('levels')
xloggerlevels = xml.find('logger_levels')
xtree = xml.find('tree')
if xlevels is not None and xlevels.text:
self.setActiveLevels(map(int, xlevels.text.split(',')))
if xloggerlevels is not None and xloggerlevels.text:
for key in xloggerlevels.text.split(','):
logger, lvl = key.split(':')
lvl = int(lvl)
self.setLoggerLevel(logger, lvl)
if xtree is not None:
self.uiRecordTREE.restoreXml(xtree) |
def scan(host, port=80, url=None, https=False, timeout=1, max_size=65535):
"""
Scan a network port
Parameters
----------
host : str
Host or ip address to scan
port : int, optional
Port to scan, default=80
url : str, optional
URL to perform get request to on the host and port specified
https : bool, optional
Perform ssl connection on the socket, default=False
timeout : float
Timeout for network operations, default=1
Returns
-------
dict
Result dictionary that contains the following keys:
host - The host or IP address that was scanned
port - The port number that was scanned
state - The state of the port, will be either "open" or "closed"
durations - An ordered dictionary with floating point value of the
time elapsed for each connection operation
Raises
------
ScanFailed - The scan operation failed
"""
starts = OrderedDict()
ends = OrderedDict()
port = int(port)
result = dict(
host=host, port=port, state='closed', durations=OrderedDict()
)
if url:
timeout = 1
result['code'] = None
starts['all'] = starts['dns'] = datetime.datetime.now()
# DNS Lookup
try:
hostip = socket.gethostbyname(host)
result['ip'] = hostip
ends['dns'] = datetime.datetime.now()
except socket.gaierror:
raise ScanFailed('DNS Lookup failed', result=result)
# TCP Connect
starts['connect'] = datetime.datetime.now()
network_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
network_socket.settimeout(timeout)
result_connection = network_socket.connect_ex((hostip, port))
ends['connect'] = datetime.datetime.now()
# SSL
if https:
starts['ssl'] = datetime.datetime.now()
try:
network_socket = ssl.wrap_socket(network_socket)
except socket.timeout:
raise ScanFailed('SSL socket timeout', result=result)
ends['ssl'] = datetime.datetime.now()
# Get request
if result_connection == 0 and url:
starts['request'] = datetime.datetime.now()
network_socket.send(
"GET {0} HTTP/1.0\r\nHost: {1}\r\n\r\n".format(
url, host
).encode('ascii'))
if max_size:
data = network_socket.recv(max_size)
else:
data = network_socket.recv()
result['length'] = len(data)
data = data.decode('ascii', errors='ignore')
result['response'] = (data)
try:
result['code'] = int(data.split('\n')[0].split()[1])
except IndexError:
pass
ends['request'] = datetime.datetime.now()
network_socket.close()
# Calculate durations
ends['all'] = datetime.datetime.now()
for duration in starts.keys():
if duration in ends.keys():
result['durations'][duration] = ends[duration] - starts[duration]
if result_connection == 0:
result['state'] = 'open'
return result | Scan a network port
Parameters
----------
host : str
Host or ip address to scan
port : int, optional
Port to scan, default=80
url : str, optional
URL to perform get request to on the host and port specified
https : bool, optional
Perform ssl connection on the socket, default=False
timeout : float
Timeout for network operations, default=1
Returns
-------
dict
Result dictionary that contains the following keys:
host - The host or IP address that was scanned
port - The port number that was scanned
state - The state of the port, will be either "open" or "closed"
durations - An ordered dictionary with floating point value of the
time elapsed for each connection operation
Raises
------
ScanFailed - The scan operation failed | Below is the the instruction that describes the task:
### Input:
Scan a network port
Parameters
----------
host : str
Host or ip address to scan
port : int, optional
Port to scan, default=80
url : str, optional
URL to perform get request to on the host and port specified
https : bool, optional
Perform ssl connection on the socket, default=False
timeout : float
Timeout for network operations, default=1
Returns
-------
dict
Result dictionary that contains the following keys:
host - The host or IP address that was scanned
port - The port number that was scanned
state - The state of the port, will be either "open" or "closed"
durations - An ordered dictionary with floating point value of the
time elapsed for each connection operation
Raises
------
ScanFailed - The scan operation failed
### Response:
def scan(host, port=80, url=None, https=False, timeout=1, max_size=65535):
"""
Scan a network port
Parameters
----------
host : str
Host or ip address to scan
port : int, optional
Port to scan, default=80
url : str, optional
URL to perform get request to on the host and port specified
https : bool, optional
Perform ssl connection on the socket, default=False
timeout : float
Timeout for network operations, default=1
Returns
-------
dict
Result dictionary that contains the following keys:
host - The host or IP address that was scanned
port - The port number that was scanned
state - The state of the port, will be either "open" or "closed"
durations - An ordered dictionary with floating point value of the
time elapsed for each connection operation
Raises
------
ScanFailed - The scan operation failed
"""
starts = OrderedDict()
ends = OrderedDict()
port = int(port)
result = dict(
host=host, port=port, state='closed', durations=OrderedDict()
)
if url:
timeout = 1
result['code'] = None
starts['all'] = starts['dns'] = datetime.datetime.now()
# DNS Lookup
try:
hostip = socket.gethostbyname(host)
result['ip'] = hostip
ends['dns'] = datetime.datetime.now()
except socket.gaierror:
raise ScanFailed('DNS Lookup failed', result=result)
# TCP Connect
starts['connect'] = datetime.datetime.now()
network_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
network_socket.settimeout(timeout)
result_connection = network_socket.connect_ex((hostip, port))
ends['connect'] = datetime.datetime.now()
# SSL
if https:
starts['ssl'] = datetime.datetime.now()
try:
network_socket = ssl.wrap_socket(network_socket)
except socket.timeout:
raise ScanFailed('SSL socket timeout', result=result)
ends['ssl'] = datetime.datetime.now()
# Get request
if result_connection == 0 and url:
starts['request'] = datetime.datetime.now()
network_socket.send(
"GET {0} HTTP/1.0\r\nHost: {1}\r\n\r\n".format(
url, host
).encode('ascii'))
if max_size:
data = network_socket.recv(max_size)
else:
data = network_socket.recv()
result['length'] = len(data)
data = data.decode('ascii', errors='ignore')
result['response'] = (data)
try:
result['code'] = int(data.split('\n')[0].split()[1])
except IndexError:
pass
ends['request'] = datetime.datetime.now()
network_socket.close()
# Calculate durations
ends['all'] = datetime.datetime.now()
for duration in starts.keys():
if duration in ends.keys():
result['durations'][duration] = ends[duration] - starts[duration]
if result_connection == 0:
result['state'] = 'open'
return result |
def psf_class(self):
"""
creates instance of PSF() class based on knowledge of the observations
For the full possibility of how to create such an instance, see the PSF() class documentation
:return: instance of PSF() class
"""
if self._psf_type == 'GAUSSIAN':
psf_type = "GAUSSIAN"
fwhm = self._seeing
kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm}
elif self._psf_type == 'PIXEL':
if self._psf_model is not None:
kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': self._psf_model}
else:
raise ValueError("You need to create the class instance with a psf_model!")
else:
raise ValueError("psf_type %s not supported!" % self._psf_type)
psf_class = PSF(kwargs_psf)
return psf_class | creates instance of PSF() class based on knowledge of the observations
For the full possibility of how to create such an instance, see the PSF() class documentation
:return: instance of PSF() class | Below is the the instruction that describes the task:
### Input:
creates instance of PSF() class based on knowledge of the observations
For the full possibility of how to create such an instance, see the PSF() class documentation
:return: instance of PSF() class
### Response:
def psf_class(self):
"""
creates instance of PSF() class based on knowledge of the observations
For the full possibility of how to create such an instance, see the PSF() class documentation
:return: instance of PSF() class
"""
if self._psf_type == 'GAUSSIAN':
psf_type = "GAUSSIAN"
fwhm = self._seeing
kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm}
elif self._psf_type == 'PIXEL':
if self._psf_model is not None:
kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': self._psf_model}
else:
raise ValueError("You need to create the class instance with a psf_model!")
else:
raise ValueError("psf_type %s not supported!" % self._psf_type)
psf_class = PSF(kwargs_psf)
return psf_class |
def expect(self,
expect,
searchwindowsize=None,
maxread=None,
timeout=None,
iteration_n=1):
"""Handle child expects, with EOF and TIMEOUT handled
iteration_n - Number of times this expect has been called for the send.
If 1, (the default) then it gets added to the pane of output
(if applicable to this run)
"""
if isinstance(expect, str):
expect = [expect]
if searchwindowsize != None:
old_searchwindowsize = self.pexpect_child.searchwindowsize
self.pexpect_child.searchwindowsize = searchwindowsize
if maxread != None:
old_maxread = self.pexpect_child.maxread
self.pexpect_child.maxread = maxread
res = self.pexpect_child.expect(expect + [pexpect.TIMEOUT] + [pexpect.EOF], timeout=timeout)
if searchwindowsize != None:
self.pexpect_child.searchwindowsize = old_searchwindowsize
if maxread != None:
self.pexpect_child.maxread = old_maxread
# Add to session lines only if pane manager exists.
if shutit_global.shutit_global_object.pane_manager and iteration_n == 1:
time_seen = time.time()
lines_to_add = []
if isinstance(self.pexpect_child.before, (str,unicode)):
for line_str in self.pexpect_child.before.split('\n'):
lines_to_add.append(line_str)
if isinstance(self.pexpect_child.after, (str,unicode)):
for line_str in self.pexpect_child.after.split('\n'):
lines_to_add.append(line_str)
# If first or last line is empty, remove it.
#if len(lines_to_add) > 0 and lines_to_add[1] == '':
# lines_to_add = lines_to_add[1:]
#if len(lines_to_add) > 0 and lines_to_add[-1] == '':
# lines_to_add = lines_to_add[:-1]
for line in lines_to_add:
self.session_output_lines.append(SessionPaneLine(line_str=line, time_seen=time_seen, line_type='output'))
return res | Handle child expects, with EOF and TIMEOUT handled
iteration_n - Number of times this expect has been called for the send.
If 1, (the default) then it gets added to the pane of output
(if applicable to this run) | Below is the the instruction that describes the task:
### Input:
Handle child expects, with EOF and TIMEOUT handled
iteration_n - Number of times this expect has been called for the send.
If 1, (the default) then it gets added to the pane of output
(if applicable to this run)
### Response:
def expect(self,
expect,
searchwindowsize=None,
maxread=None,
timeout=None,
iteration_n=1):
"""Handle child expects, with EOF and TIMEOUT handled
iteration_n - Number of times this expect has been called for the send.
If 1, (the default) then it gets added to the pane of output
(if applicable to this run)
"""
if isinstance(expect, str):
expect = [expect]
if searchwindowsize != None:
old_searchwindowsize = self.pexpect_child.searchwindowsize
self.pexpect_child.searchwindowsize = searchwindowsize
if maxread != None:
old_maxread = self.pexpect_child.maxread
self.pexpect_child.maxread = maxread
res = self.pexpect_child.expect(expect + [pexpect.TIMEOUT] + [pexpect.EOF], timeout=timeout)
if searchwindowsize != None:
self.pexpect_child.searchwindowsize = old_searchwindowsize
if maxread != None:
self.pexpect_child.maxread = old_maxread
# Add to session lines only if pane manager exists.
if shutit_global.shutit_global_object.pane_manager and iteration_n == 1:
time_seen = time.time()
lines_to_add = []
if isinstance(self.pexpect_child.before, (str,unicode)):
for line_str in self.pexpect_child.before.split('\n'):
lines_to_add.append(line_str)
if isinstance(self.pexpect_child.after, (str,unicode)):
for line_str in self.pexpect_child.after.split('\n'):
lines_to_add.append(line_str)
# If first or last line is empty, remove it.
#if len(lines_to_add) > 0 and lines_to_add[1] == '':
# lines_to_add = lines_to_add[1:]
#if len(lines_to_add) > 0 and lines_to_add[-1] == '':
# lines_to_add = lines_to_add[:-1]
for line in lines_to_add:
self.session_output_lines.append(SessionPaneLine(line_str=line, time_seen=time_seen, line_type='output'))
return res |
def play_Bar(self, bar):
"""Convert a Bar object to MIDI events and write them to the
track_data."""
self.set_deltatime(self.delay)
self.delay = 0
self.set_meter(bar.meter)
self.set_deltatime(0)
self.set_key(bar.key)
for x in bar:
tick = int(round((1.0 / x[1]) * 288))
if x[2] is None or len(x[2]) == 0:
self.delay += tick
else:
self.set_deltatime(self.delay)
self.delay = 0
if hasattr(x[2], 'bpm'):
self.set_deltatime(0)
self.set_tempo(x[2].bpm)
self.play_NoteContainer(x[2])
self.set_deltatime(self.int_to_varbyte(tick))
self.stop_NoteContainer(x[2]) | Convert a Bar object to MIDI events and write them to the
track_data. | Below is the the instruction that describes the task:
### Input:
Convert a Bar object to MIDI events and write them to the
track_data.
### Response:
def play_Bar(self, bar):
"""Convert a Bar object to MIDI events and write them to the
track_data."""
self.set_deltatime(self.delay)
self.delay = 0
self.set_meter(bar.meter)
self.set_deltatime(0)
self.set_key(bar.key)
for x in bar:
tick = int(round((1.0 / x[1]) * 288))
if x[2] is None or len(x[2]) == 0:
self.delay += tick
else:
self.set_deltatime(self.delay)
self.delay = 0
if hasattr(x[2], 'bpm'):
self.set_deltatime(0)
self.set_tempo(x[2].bpm)
self.play_NoteContainer(x[2])
self.set_deltatime(self.int_to_varbyte(tick))
self.stop_NoteContainer(x[2]) |
def success(item):
'''Successful finish'''
try:
# mv to done
trg_queue = item.queue
os.rename(fsq_path.item(trg_queue, item.id, host=item.host),
os.path.join(fsq_path.done(trg_queue, host=item.host),
item.id))
except AttributeError, e:
# DuckType TypeError'ing
raise TypeError(u'item must be an FSQWorkItem, not:'\
u' {0}'.format(item.__class__.__name__))
except (OSError, IOError, ), e:
raise FSQDoneError(e.errno, u'cannot mv item to done: {0}:'\
u' {1}'.format(item.id, wrap_io_os_err(e))) | Successful finish | Below is the the instruction that describes the task:
### Input:
Successful finish
### Response:
def success(item):
'''Successful finish'''
try:
# mv to done
trg_queue = item.queue
os.rename(fsq_path.item(trg_queue, item.id, host=item.host),
os.path.join(fsq_path.done(trg_queue, host=item.host),
item.id))
except AttributeError, e:
# DuckType TypeError'ing
raise TypeError(u'item must be an FSQWorkItem, not:'\
u' {0}'.format(item.__class__.__name__))
except (OSError, IOError, ), e:
raise FSQDoneError(e.errno, u'cannot mv item to done: {0}:'\
u' {1}'.format(item.id, wrap_io_os_err(e))) |
def get_export_configuration(self, config_id):
"""
Retrieve the ExportConfiguration with the given ID
:param string config_id:
ID for which to search
:return:
a :class:`meteorpi_model.ExportConfiguration` or None, or no match was found.
"""
sql = (
'SELECT uid, exportConfigId, exportType, searchString, targetURL, '
'targetUser, targetPassword, exportName, description, active '
'FROM archive_exportConfig WHERE exportConfigId = %s')
return first_from_generator(
self.generators.export_configuration_generator(sql=sql, sql_args=(config_id,))) | Retrieve the ExportConfiguration with the given ID
:param string config_id:
ID for which to search
:return:
a :class:`meteorpi_model.ExportConfiguration` or None, or no match was found. | Below is the the instruction that describes the task:
### Input:
Retrieve the ExportConfiguration with the given ID
:param string config_id:
ID for which to search
:return:
a :class:`meteorpi_model.ExportConfiguration` or None, or no match was found.
### Response:
def get_export_configuration(self, config_id):
"""
Retrieve the ExportConfiguration with the given ID
:param string config_id:
ID for which to search
:return:
a :class:`meteorpi_model.ExportConfiguration` or None, or no match was found.
"""
sql = (
'SELECT uid, exportConfigId, exportType, searchString, targetURL, '
'targetUser, targetPassword, exportName, description, active '
'FROM archive_exportConfig WHERE exportConfigId = %s')
return first_from_generator(
self.generators.export_configuration_generator(sql=sql, sql_args=(config_id,))) |
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow() | Launch the process and start processing the DAG. | Below is the the instruction that describes the task:
### Input:
Launch the process and start processing the DAG.
### Response:
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow() |
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()} | Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset | Below is the the instruction that describes the task:
### Input:
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
### Response:
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()} |
def _render_short_instance(self, instance):
"""
For those very short versions of resources, we have this.
:param instance: The instance to render
"""
check_permission(instance, None, Permissions.VIEW)
return {'type': instance.__jsonapi_type__, 'id': instance.id} | For those very short versions of resources, we have this.
:param instance: The instance to render | Below is the the instruction that describes the task:
### Input:
For those very short versions of resources, we have this.
:param instance: The instance to render
### Response:
def _render_short_instance(self, instance):
"""
For those very short versions of resources, we have this.
:param instance: The instance to render
"""
check_permission(instance, None, Permissions.VIEW)
return {'type': instance.__jsonapi_type__, 'id': instance.id} |
def from_config(config):
"""Return a Recruiter instance based on the configuration.
Default is HotAirRecruiter in debug mode (unless we're using
the bot recruiter, which can be used in debug mode)
and the MTurkRecruiter in other modes.
"""
debug_mode = config.get("mode") == "debug"
name = config.get("recruiter", None)
recruiter = None
# Special case 1: Don't use a configured recruiter in replay mode
if config.get("replay"):
return HotAirRecruiter()
if name is not None:
recruiter = by_name(name)
# Special case 2: may run BotRecruiter or MultiRecruiter in any mode
# (debug or not), so it trumps everything else:
if isinstance(recruiter, (BotRecruiter, MultiRecruiter)):
return recruiter
# Special case 3: if we're not using bots and we're in debug mode,
# ignore any configured recruiter:
if debug_mode:
return HotAirRecruiter()
# Configured recruiter:
if recruiter is not None:
return recruiter
if name and recruiter is None:
raise NotImplementedError("No such recruiter {}".format(name))
# Default if we're not in debug mode:
return MTurkRecruiter() | Return a Recruiter instance based on the configuration.
Default is HotAirRecruiter in debug mode (unless we're using
the bot recruiter, which can be used in debug mode)
and the MTurkRecruiter in other modes. | Below is the the instruction that describes the task:
### Input:
Return a Recruiter instance based on the configuration.
Default is HotAirRecruiter in debug mode (unless we're using
the bot recruiter, which can be used in debug mode)
and the MTurkRecruiter in other modes.
### Response:
def from_config(config):
"""Return a Recruiter instance based on the configuration.
Default is HotAirRecruiter in debug mode (unless we're using
the bot recruiter, which can be used in debug mode)
and the MTurkRecruiter in other modes.
"""
debug_mode = config.get("mode") == "debug"
name = config.get("recruiter", None)
recruiter = None
# Special case 1: Don't use a configured recruiter in replay mode
if config.get("replay"):
return HotAirRecruiter()
if name is not None:
recruiter = by_name(name)
# Special case 2: may run BotRecruiter or MultiRecruiter in any mode
# (debug or not), so it trumps everything else:
if isinstance(recruiter, (BotRecruiter, MultiRecruiter)):
return recruiter
# Special case 3: if we're not using bots and we're in debug mode,
# ignore any configured recruiter:
if debug_mode:
return HotAirRecruiter()
# Configured recruiter:
if recruiter is not None:
return recruiter
if name and recruiter is None:
raise NotImplementedError("No such recruiter {}".format(name))
# Default if we're not in debug mode:
return MTurkRecruiter() |
def _probe_positions(probe, group):
"""Return the positions of a probe channel group."""
positions = probe['channel_groups'][group]['geometry']
channels = _probe_channels(probe, group)
return np.array([positions[channel] for channel in channels]) | Return the positions of a probe channel group. | Below is the the instruction that describes the task:
### Input:
Return the positions of a probe channel group.
### Response:
def _probe_positions(probe, group):
"""Return the positions of a probe channel group."""
positions = probe['channel_groups'][group]['geometry']
channels = _probe_channels(probe, group)
return np.array([positions[channel] for channel in channels]) |
def charge(
self,
amount,
currency=None,
application_fee=None,
capture=None,
description=None,
destination=None,
metadata=None,
shipping=None,
source=None,
statement_descriptor=None,
idempotency_key=None,
):
"""
Creates a charge for this customer.
Parameters not implemented:
* **receipt_email** - Since this is a charge on a customer, the customer's email address is used.
:param amount: The amount to charge.
:type amount: Decimal. Precision is 2; anything more will be ignored.
:param currency: 3-letter ISO code for currency
:type currency: string
:param application_fee: A fee that will be applied to the charge and transfered to the platform owner's
account.
:type application_fee: Decimal. Precision is 2; anything more will be ignored.
:param capture: Whether or not to immediately capture the charge. When false, the charge issues an
authorization (or pre-authorization), and will need to be captured later. Uncaptured
charges expire in 7 days. Default is True
:type capture: bool
:param description: An arbitrary string.
:type description: string
:param destination: An account to make the charge on behalf of.
:type destination: Account
:param metadata: A set of key/value pairs useful for storing additional information.
:type metadata: dict
:param shipping: Shipping information for the charge.
:type shipping: dict
:param source: The source to use for this charge. Must be a source attributed to this customer. If None,
the customer's default source is used. Can be either the id of the source or the source object
itself.
:type source: string, Source
:param statement_descriptor: An arbitrary string to be displayed on the customer's credit card statement.
:type statement_descriptor: string
"""
if not isinstance(amount, decimal.Decimal):
raise ValueError("You must supply a decimal value representing dollars.")
# TODO: better default detection (should charge in customer default)
currency = currency or "usd"
# Convert Source to id
if source and isinstance(source, StripeModel):
source = source.id
stripe_charge = Charge._api_create(
amount=int(amount * 100), # Convert dollars into cents
currency=currency,
application_fee=int(application_fee * 100)
if application_fee
else None, # Convert dollars into cents
capture=capture,
description=description,
destination=destination,
metadata=metadata,
shipping=shipping,
customer=self.id,
source=source,
statement_descriptor=statement_descriptor,
idempotency_key=idempotency_key,
)
return Charge.sync_from_stripe_data(stripe_charge) | Creates a charge for this customer.
Parameters not implemented:
* **receipt_email** - Since this is a charge on a customer, the customer's email address is used.
:param amount: The amount to charge.
:type amount: Decimal. Precision is 2; anything more will be ignored.
:param currency: 3-letter ISO code for currency
:type currency: string
:param application_fee: A fee that will be applied to the charge and transfered to the platform owner's
account.
:type application_fee: Decimal. Precision is 2; anything more will be ignored.
:param capture: Whether or not to immediately capture the charge. When false, the charge issues an
authorization (or pre-authorization), and will need to be captured later. Uncaptured
charges expire in 7 days. Default is True
:type capture: bool
:param description: An arbitrary string.
:type description: string
:param destination: An account to make the charge on behalf of.
:type destination: Account
:param metadata: A set of key/value pairs useful for storing additional information.
:type metadata: dict
:param shipping: Shipping information for the charge.
:type shipping: dict
:param source: The source to use for this charge. Must be a source attributed to this customer. If None,
the customer's default source is used. Can be either the id of the source or the source object
itself.
:type source: string, Source
:param statement_descriptor: An arbitrary string to be displayed on the customer's credit card statement.
:type statement_descriptor: string | Below is the the instruction that describes the task:
### Input:
Creates a charge for this customer.
Parameters not implemented:
* **receipt_email** - Since this is a charge on a customer, the customer's email address is used.
:param amount: The amount to charge.
:type amount: Decimal. Precision is 2; anything more will be ignored.
:param currency: 3-letter ISO code for currency
:type currency: string
:param application_fee: A fee that will be applied to the charge and transfered to the platform owner's
account.
:type application_fee: Decimal. Precision is 2; anything more will be ignored.
:param capture: Whether or not to immediately capture the charge. When false, the charge issues an
authorization (or pre-authorization), and will need to be captured later. Uncaptured
charges expire in 7 days. Default is True
:type capture: bool
:param description: An arbitrary string.
:type description: string
:param destination: An account to make the charge on behalf of.
:type destination: Account
:param metadata: A set of key/value pairs useful for storing additional information.
:type metadata: dict
:param shipping: Shipping information for the charge.
:type shipping: dict
:param source: The source to use for this charge. Must be a source attributed to this customer. If None,
the customer's default source is used. Can be either the id of the source or the source object
itself.
:type source: string, Source
:param statement_descriptor: An arbitrary string to be displayed on the customer's credit card statement.
:type statement_descriptor: string
### Response:
def charge(
self,
amount,
currency=None,
application_fee=None,
capture=None,
description=None,
destination=None,
metadata=None,
shipping=None,
source=None,
statement_descriptor=None,
idempotency_key=None,
):
"""
Creates a charge for this customer.
Parameters not implemented:
* **receipt_email** - Since this is a charge on a customer, the customer's email address is used.
:param amount: The amount to charge.
:type amount: Decimal. Precision is 2; anything more will be ignored.
:param currency: 3-letter ISO code for currency
:type currency: string
:param application_fee: A fee that will be applied to the charge and transfered to the platform owner's
account.
:type application_fee: Decimal. Precision is 2; anything more will be ignored.
:param capture: Whether or not to immediately capture the charge. When false, the charge issues an
authorization (or pre-authorization), and will need to be captured later. Uncaptured
charges expire in 7 days. Default is True
:type capture: bool
:param description: An arbitrary string.
:type description: string
:param destination: An account to make the charge on behalf of.
:type destination: Account
:param metadata: A set of key/value pairs useful for storing additional information.
:type metadata: dict
:param shipping: Shipping information for the charge.
:type shipping: dict
:param source: The source to use for this charge. Must be a source attributed to this customer. If None,
the customer's default source is used. Can be either the id of the source or the source object
itself.
:type source: string, Source
:param statement_descriptor: An arbitrary string to be displayed on the customer's credit card statement.
:type statement_descriptor: string
"""
if not isinstance(amount, decimal.Decimal):
raise ValueError("You must supply a decimal value representing dollars.")
# TODO: better default detection (should charge in customer default)
currency = currency or "usd"
# Convert Source to id
if source and isinstance(source, StripeModel):
source = source.id
stripe_charge = Charge._api_create(
amount=int(amount * 100), # Convert dollars into cents
currency=currency,
application_fee=int(application_fee * 100)
if application_fee
else None, # Convert dollars into cents
capture=capture,
description=description,
destination=destination,
metadata=metadata,
shipping=shipping,
customer=self.id,
source=source,
statement_descriptor=statement_descriptor,
idempotency_key=idempotency_key,
)
return Charge.sync_from_stripe_data(stripe_charge) |
def attr(self, *args):
'''Add the specific attribute to the attribute dictionary
with key ``name`` and value ``value`` and return ``self``.'''
attr = self._attr
if not args:
return attr or {}
result, adding = self._attrdata('attr', *args)
if adding:
for key, value in result.items():
if DATARE.match(key):
self.data(key[5:], value)
else:
if attr is None:
self._extra['attr'] = attr = {}
attr[key] = value
result = self
return result | Add the specific attribute to the attribute dictionary
with key ``name`` and value ``value`` and return ``self``. | Below is the the instruction that describes the task:
### Input:
Add the specific attribute to the attribute dictionary
with key ``name`` and value ``value`` and return ``self``.
### Response:
def attr(self, *args):
'''Add the specific attribute to the attribute dictionary
with key ``name`` and value ``value`` and return ``self``.'''
attr = self._attr
if not args:
return attr or {}
result, adding = self._attrdata('attr', *args)
if adding:
for key, value in result.items():
if DATARE.match(key):
self.data(key[5:], value)
else:
if attr is None:
self._extra['attr'] = attr = {}
attr[key] = value
result = self
return result |
def has_param(self, param):
""" .. todo:: has_param docstring
"""
# Imports
from ..error import RepoError
# Try to get the param; pass along all errors, except 'data' error
# from RepoError
retval = True
try:
self.get_param(param)
except RepoError as RErr:
if RErr.tc == RErr.DATA:
retval = False
## end if
## end try
# Should be good to return
return retval | .. todo:: has_param docstring | Below is the the instruction that describes the task:
### Input:
.. todo:: has_param docstring
### Response:
def has_param(self, param):
""" .. todo:: has_param docstring
"""
# Imports
from ..error import RepoError
# Try to get the param; pass along all errors, except 'data' error
# from RepoError
retval = True
try:
self.get_param(param)
except RepoError as RErr:
if RErr.tc == RErr.DATA:
retval = False
## end if
## end try
# Should be good to return
return retval |
def process_data(self, new_data):
"""
handles incoming data from the `IrcProtocol` connection.
Main data processing/routing is handled by the _process_line
method, inherited from `ServerConnection`
"""
self.buffer.feed(new_data)
# process each non-empty line after logging all lines
for line in self.buffer:
log.debug("FROM SERVER: %s", line)
if not line:
continue
self._process_line(line) | handles incoming data from the `IrcProtocol` connection.
Main data processing/routing is handled by the _process_line
method, inherited from `ServerConnection` | Below is the the instruction that describes the task:
### Input:
handles incoming data from the `IrcProtocol` connection.
Main data processing/routing is handled by the _process_line
method, inherited from `ServerConnection`
### Response:
def process_data(self, new_data):
"""
handles incoming data from the `IrcProtocol` connection.
Main data processing/routing is handled by the _process_line
method, inherited from `ServerConnection`
"""
self.buffer.feed(new_data)
# process each non-empty line after logging all lines
for line in self.buffer:
log.debug("FROM SERVER: %s", line)
if not line:
continue
self._process_line(line) |
def _compare_blocks(block_a, block_b):
"""Compare two blocks of characters
Compares two blocks of characters of the form returned by either
the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks
should be character lists containing only digits or only letters.
Both blocks should contain the same character type (digits or
letters).
The method of comparison mirrors the method used by RPM. If the
blocks are digit blocks, any leading zeros are trimmed, and
whichever block is longer is assumed to be larger. If the resultant
blocks are the same length, or if the blocks are non-numeric, they
are checked for string equality and considered equal if the string
equality comparison returns True. If not, whichever evaluates as
greater than the other (again in string comparison) is assumed to be
larger.
:param list block_a: an all numeric or all alphabetic character
list
:param list block_b: an all numeric or all alphabetic character
list. Alphabetic or numeric character should match ``block_a``
:return: 1 (if ``a`` is newer), 0 (if versions are equal) or
-1 (if ``b`` is newer)
:rtype: int
"""
logger.debug('_compare_blocks(%s, %s)', block_a, block_b)
if block_a[0].isdigit():
_trim_zeros(block_a, block_b)
if len(block_a) != len(block_b):
logger.debug('block lengths are not equal')
return a_newer if len(block_a) > len(block_b) else b_newer
if block_a == block_b:
logger.debug('blocks are equal')
return a_eq_b
else:
logger.debug('blocks are not equal')
return a_newer if block_a > block_b else b_newer | Compare two blocks of characters
Compares two blocks of characters of the form returned by either
the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks
should be character lists containing only digits or only letters.
Both blocks should contain the same character type (digits or
letters).
The method of comparison mirrors the method used by RPM. If the
blocks are digit blocks, any leading zeros are trimmed, and
whichever block is longer is assumed to be larger. If the resultant
blocks are the same length, or if the blocks are non-numeric, they
are checked for string equality and considered equal if the string
equality comparison returns True. If not, whichever evaluates as
greater than the other (again in string comparison) is assumed to be
larger.
:param list block_a: an all numeric or all alphabetic character
list
:param list block_b: an all numeric or all alphabetic character
list. Alphabetic or numeric character should match ``block_a``
:return: 1 (if ``a`` is newer), 0 (if versions are equal) or
-1 (if ``b`` is newer)
:rtype: int | Below is the the instruction that describes the task:
### Input:
Compare two blocks of characters
Compares two blocks of characters of the form returned by either
the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks
should be character lists containing only digits or only letters.
Both blocks should contain the same character type (digits or
letters).
The method of comparison mirrors the method used by RPM. If the
blocks are digit blocks, any leading zeros are trimmed, and
whichever block is longer is assumed to be larger. If the resultant
blocks are the same length, or if the blocks are non-numeric, they
are checked for string equality and considered equal if the string
equality comparison returns True. If not, whichever evaluates as
greater than the other (again in string comparison) is assumed to be
larger.
:param list block_a: an all numeric or all alphabetic character
list
:param list block_b: an all numeric or all alphabetic character
list. Alphabetic or numeric character should match ``block_a``
:return: 1 (if ``a`` is newer), 0 (if versions are equal) or
-1 (if ``b`` is newer)
:rtype: int
### Response:
def _compare_blocks(block_a, block_b):
"""Compare two blocks of characters
Compares two blocks of characters of the form returned by either
the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks
should be character lists containing only digits or only letters.
Both blocks should contain the same character type (digits or
letters).
The method of comparison mirrors the method used by RPM. If the
blocks are digit blocks, any leading zeros are trimmed, and
whichever block is longer is assumed to be larger. If the resultant
blocks are the same length, or if the blocks are non-numeric, they
are checked for string equality and considered equal if the string
equality comparison returns True. If not, whichever evaluates as
greater than the other (again in string comparison) is assumed to be
larger.
:param list block_a: an all numeric or all alphabetic character
list
:param list block_b: an all numeric or all alphabetic character
list. Alphabetic or numeric character should match ``block_a``
:return: 1 (if ``a`` is newer), 0 (if versions are equal) or
-1 (if ``b`` is newer)
:rtype: int
"""
logger.debug('_compare_blocks(%s, %s)', block_a, block_b)
if block_a[0].isdigit():
_trim_zeros(block_a, block_b)
if len(block_a) != len(block_b):
logger.debug('block lengths are not equal')
return a_newer if len(block_a) > len(block_b) else b_newer
if block_a == block_b:
logger.debug('blocks are equal')
return a_eq_b
else:
logger.debug('blocks are not equal')
return a_newer if block_a > block_b else b_newer |
def convert_coords(self):
"""
Process list of coordinates
This mainly searches for tuple of coordinates in the coordinate list and
creates a SkyCoord or PixCoord object from them if appropriate for a
given region type. This involves again some coordinate transformation,
so this step could be moved to the parsing process
"""
if self.coordsys in ['image', 'physical']:
coords = self._convert_pix_coords()
else:
coords = self._convert_sky_coords()
if self.region_type == 'line':
coords = [coords[0][0], coords[0][1]]
if self.region_type == 'text':
coords.append(self.meta['text'])
return coords | Process list of coordinates
This mainly searches for tuple of coordinates in the coordinate list and
creates a SkyCoord or PixCoord object from them if appropriate for a
given region type. This involves again some coordinate transformation,
so this step could be moved to the parsing process | Below is the the instruction that describes the task:
### Input:
Process list of coordinates
This mainly searches for tuple of coordinates in the coordinate list and
creates a SkyCoord or PixCoord object from them if appropriate for a
given region type. This involves again some coordinate transformation,
so this step could be moved to the parsing process
### Response:
def convert_coords(self):
"""
Process list of coordinates
This mainly searches for tuple of coordinates in the coordinate list and
creates a SkyCoord or PixCoord object from them if appropriate for a
given region type. This involves again some coordinate transformation,
so this step could be moved to the parsing process
"""
if self.coordsys in ['image', 'physical']:
coords = self._convert_pix_coords()
else:
coords = self._convert_sky_coords()
if self.region_type == 'line':
coords = [coords[0][0], coords[0][1]]
if self.region_type == 'text':
coords.append(self.meta['text'])
return coords |
def release(self):
""" Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources
"""
self._logger.debug("Releasing: %r", self)
# Discard self from set of outstanding instances
if self._addedToInstanceSet:
try:
self._clsOutstandingInstances.remove(self)
except:
self._logger.exception(
"Failed to remove self from _clsOutstandingInstances: %r;", self)
raise
self._releaser(dbConn=self.dbConn, cursor=self.cursor)
self.__class__._clsNumOutstanding -= 1
assert self._clsNumOutstanding >= 0, \
"_clsNumOutstanding=%r" % (self._clsNumOutstanding,)
self._releaser = None
self.cursor = None
self.dbConn = None
self._creationTracebackString = None
self._addedToInstanceSet = False
self._logger = None
return | Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources | Below is the the instruction that describes the task:
### Input:
Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources
### Response:
def release(self):
""" Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources
"""
self._logger.debug("Releasing: %r", self)
# Discard self from set of outstanding instances
if self._addedToInstanceSet:
try:
self._clsOutstandingInstances.remove(self)
except:
self._logger.exception(
"Failed to remove self from _clsOutstandingInstances: %r;", self)
raise
self._releaser(dbConn=self.dbConn, cursor=self.cursor)
self.__class__._clsNumOutstanding -= 1
assert self._clsNumOutstanding >= 0, \
"_clsNumOutstanding=%r" % (self._clsNumOutstanding,)
self._releaser = None
self.cursor = None
self.dbConn = None
self._creationTracebackString = None
self._addedToInstanceSet = False
self._logger = None
return |
def get_string_plus_property_value(value):
# type: (Any) -> Optional[List[str]]
"""
Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None
"""
if value:
if isinstance(value, str):
return [value]
if isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return None | Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None | Below is the the instruction that describes the task:
### Input:
Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None
### Response:
def get_string_plus_property_value(value):
# type: (Any) -> Optional[List[str]]
"""
Converts a string or list of string into a list of strings
:param value: A string or a list of strings
:return: A list of strings or None
"""
if value:
if isinstance(value, str):
return [value]
if isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return None |
def api_routes(self, callsign: str) -> Tuple[Airport, ...]:
"""Returns the route associated to a callsign."""
from .. import airports
c = requests.get(
f"https://opensky-network.org/api/routes?callsign={callsign}"
)
if c.status_code == 404:
raise ValueError("Unknown callsign")
if c.status_code != 200:
raise ValueError(c.content.decode())
json = c.json()
return tuple(airports[a] for a in json["route"]) | Returns the route associated to a callsign. | Below is the the instruction that describes the task:
### Input:
Returns the route associated to a callsign.
### Response:
def api_routes(self, callsign: str) -> Tuple[Airport, ...]:
"""Returns the route associated to a callsign."""
from .. import airports
c = requests.get(
f"https://opensky-network.org/api/routes?callsign={callsign}"
)
if c.status_code == 404:
raise ValueError("Unknown callsign")
if c.status_code != 200:
raise ValueError(c.content.decode())
json = c.json()
return tuple(airports[a] for a in json["route"]) |
def execute(*args, **kwargs):
"""Executes the sql statement, but does not commit. Returns the cursor to commit
@return: DB and cursor instance following sql execution
"""
# Inspect the call stack for the originating call
args = CoyoteDb.__add_query_comment(args[0])
db = CoyoteDb.__get_db_write_instance(target_database=kwargs.pop('target_database', None))
filtered_kwargs = {k: v for k, v in kwargs.iteritems() if k != 'target_database'}
# Execute the query
cursor = db.cursor()
try:
cursor.execute(*args, **filtered_kwargs)
except OperationalError, e:
raise OperationalError('{} when executing: {}'.format(e.args, args[0]))
return db, cursor | Executes the sql statement, but does not commit. Returns the cursor to commit
@return: DB and cursor instance following sql execution | Below is the the instruction that describes the task:
### Input:
Executes the sql statement, but does not commit. Returns the cursor to commit
@return: DB and cursor instance following sql execution
### Response:
def execute(*args, **kwargs):
"""Executes the sql statement, but does not commit. Returns the cursor to commit
@return: DB and cursor instance following sql execution
"""
# Inspect the call stack for the originating call
args = CoyoteDb.__add_query_comment(args[0])
db = CoyoteDb.__get_db_write_instance(target_database=kwargs.pop('target_database', None))
filtered_kwargs = {k: v for k, v in kwargs.iteritems() if k != 'target_database'}
# Execute the query
cursor = db.cursor()
try:
cursor.execute(*args, **filtered_kwargs)
except OperationalError, e:
raise OperationalError('{} when executing: {}'.format(e.args, args[0]))
return db, cursor |
def run(self):
"""Runs a single experiment task"""
self.__logger.debug("run(): Starting task <%s>", self.__task['taskLabel'])
# Set up the task
# Create our main loop-control iterator
if self.__cmdOptions.privateOptions['testMode']:
numIters = 10
else:
numIters = self.__task['iterationCount']
if numIters >= 0:
iterTracker = iter(xrange(numIters))
else:
iterTracker = iter(itertools.count())
# Initialize periodic activities
periodic = PeriodicActivityMgr(
requestedActivities=self._createPeriodicActivities())
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates()
# Have Task Driver perform its initial setup activities, including setup
# callbacks
self.__taskDriver.setup()
# Run it!
while True:
# Check controlling iterator first
try:
next(iterTracker)
except StopIteration:
break
# Read next input record
try:
inputRecord = self.__datasetReader.next()
except StopIteration:
break
# Process input record
result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord)
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
self.__predictionLogger.writeRecord(result)
# Run periodic activities
periodic.tick()
# Dump the experiment metrics at the end of the task
self._getAndEmitExperimentMetrics(final=True)
# Have Task Driver perform its final activities
self.__taskDriver.finalize()
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates() | Runs a single experiment task | Below is the the instruction that describes the task:
### Input:
Runs a single experiment task
### Response:
def run(self):
"""Runs a single experiment task"""
self.__logger.debug("run(): Starting task <%s>", self.__task['taskLabel'])
# Set up the task
# Create our main loop-control iterator
if self.__cmdOptions.privateOptions['testMode']:
numIters = 10
else:
numIters = self.__task['iterationCount']
if numIters >= 0:
iterTracker = iter(xrange(numIters))
else:
iterTracker = iter(itertools.count())
# Initialize periodic activities
periodic = PeriodicActivityMgr(
requestedActivities=self._createPeriodicActivities())
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates()
# Have Task Driver perform its initial setup activities, including setup
# callbacks
self.__taskDriver.setup()
# Run it!
while True:
# Check controlling iterator first
try:
next(iterTracker)
except StopIteration:
break
# Read next input record
try:
inputRecord = self.__datasetReader.next()
except StopIteration:
break
# Process input record
result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord)
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
self.__predictionLogger.writeRecord(result)
# Run periodic activities
periodic.tick()
# Dump the experiment metrics at the end of the task
self._getAndEmitExperimentMetrics(final=True)
# Have Task Driver perform its final activities
self.__taskDriver.finalize()
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates() |
def parameter_count(funcsig):
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
) | Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters | Below is the the instruction that describes the task:
### Input:
Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
### Response:
def parameter_count(funcsig):
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
) |
def get_full_lonlats(self):
"""Get the interpolated lons/lats.
"""
if self.lons is not None and self.lats is not None:
return self.lons, self.lats
self.lons, self.lats = self._get_full_lonlats()
self.lons = da.from_delayed(self.lons, dtype=self["EARTH_LOCATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.lats = da.from_delayed(self.lats, dtype=self["EARTH_LOCATIONS"].dtype,
shape=(self.scanlines, self.pixels))
return self.lons, self.lats | Get the interpolated lons/lats. | Below is the the instruction that describes the task:
### Input:
Get the interpolated lons/lats.
### Response:
def get_full_lonlats(self):
"""Get the interpolated lons/lats.
"""
if self.lons is not None and self.lats is not None:
return self.lons, self.lats
self.lons, self.lats = self._get_full_lonlats()
self.lons = da.from_delayed(self.lons, dtype=self["EARTH_LOCATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.lats = da.from_delayed(self.lats, dtype=self["EARTH_LOCATIONS"].dtype,
shape=(self.scanlines, self.pixels))
return self.lons, self.lats |
def encode(self):
"""Encode this record into binary, suitable for embedded into an update script.
This function will create multiple records that correspond to the actual
underlying rpcs that SetConfigRecord turns into.
Returns:
bytearary: The binary version of the record that could be parsed via
a call to UpdateRecord.FromBinary()
"""
begin_payload = struct.pack("<H8s", self.config_id, self.target.encode())
start_record = SendErrorCheckingRPCRecord(8, self.BEGIN_CONFIG_RPC, begin_payload, 4)
end_record = SendErrorCheckingRPCRecord(8, self.END_CONFIG_RPC, bytearray(), 4)
push_records = []
for i in range(0, len(self.data), 20):
chunk = self.data[i:i+20]
push_record = SendErrorCheckingRPCRecord(8, self.PUSH_CONFIG_RPC, chunk, 4)
push_records.append(push_record)
out_blob = bytearray()
out_blob += start_record.encode()
for push_record in push_records:
out_blob += push_record.encode()
out_blob += end_record.encode()
return out_blob | Encode this record into binary, suitable for embedded into an update script.
This function will create multiple records that correspond to the actual
underlying rpcs that SetConfigRecord turns into.
Returns:
bytearary: The binary version of the record that could be parsed via
a call to UpdateRecord.FromBinary() | Below is the the instruction that describes the task:
### Input:
Encode this record into binary, suitable for embedded into an update script.
This function will create multiple records that correspond to the actual
underlying rpcs that SetConfigRecord turns into.
Returns:
bytearary: The binary version of the record that could be parsed via
a call to UpdateRecord.FromBinary()
### Response:
def encode(self):
"""Encode this record into binary, suitable for embedded into an update script.
This function will create multiple records that correspond to the actual
underlying rpcs that SetConfigRecord turns into.
Returns:
bytearary: The binary version of the record that could be parsed via
a call to UpdateRecord.FromBinary()
"""
begin_payload = struct.pack("<H8s", self.config_id, self.target.encode())
start_record = SendErrorCheckingRPCRecord(8, self.BEGIN_CONFIG_RPC, begin_payload, 4)
end_record = SendErrorCheckingRPCRecord(8, self.END_CONFIG_RPC, bytearray(), 4)
push_records = []
for i in range(0, len(self.data), 20):
chunk = self.data[i:i+20]
push_record = SendErrorCheckingRPCRecord(8, self.PUSH_CONFIG_RPC, chunk, 4)
push_records.append(push_record)
out_blob = bytearray()
out_blob += start_record.encode()
for push_record in push_records:
out_blob += push_record.encode()
out_blob += end_record.encode()
return out_blob |
def matches(self, client, event_data):
"""True if all filters are matching."""
for f in self.filters:
if not f(client, event_data):
return False
return True | True if all filters are matching. | Below is the the instruction that describes the task:
### Input:
True if all filters are matching.
### Response:
def matches(self, client, event_data):
"""True if all filters are matching."""
for f in self.filters:
if not f(client, event_data):
return False
return True |
def aside_for(cls, view_name):
"""
A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...)
"""
# pylint: disable=protected-access
def _decorator(func): # pylint: disable=missing-docstring
if not hasattr(func, '_aside_for'):
func._aside_for = []
func._aside_for.append(view_name) # pylint: disable=protected-access
return func
return _decorator | A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...) | Below is the the instruction that describes the task:
### Input:
A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...)
### Response:
def aside_for(cls, view_name):
"""
A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...)
"""
# pylint: disable=protected-access
def _decorator(func): # pylint: disable=missing-docstring
if not hasattr(func, '_aside_for'):
func._aside_for = []
func._aside_for.append(view_name) # pylint: disable=protected-access
return func
return _decorator |
def reset_point_source_cache(self, bool=True):
"""
deletes all the cache in the point source class and saves it from then on
:return:
"""
for imageModel in self._imageModel_list:
imageModel.reset_point_source_cache(bool=bool) | deletes all the cache in the point source class and saves it from then on
:return: | Below is the the instruction that describes the task:
### Input:
deletes all the cache in the point source class and saves it from then on
:return:
### Response:
def reset_point_source_cache(self, bool=True):
"""
deletes all the cache in the point source class and saves it from then on
:return:
"""
for imageModel in self._imageModel_list:
imageModel.reset_point_source_cache(bool=bool) |
def initializerepo(self):
""" Fill empty directory with products and make first commit """
try:
os.mkdir(self.repopath)
except OSError:
pass
cmd = self.repo.init(bare=self.bare, shared=self.shared)
if not self.bare:
self.write_testing_data([], [])
self.write_training_data([], [])
self.write_classifier(None)
cmd = self.repo.add('training.pkl')
cmd = self.repo.add('testing.pkl')
cmd = self.repo.add('classifier.pkl')
cmd = self.repo.commit(m='initial commit')
cmd = self.repo.tag('initial')
cmd = self.set_version('initial') | Fill empty directory with products and make first commit | Below is the the instruction that describes the task:
### Input:
Fill empty directory with products and make first commit
### Response:
def initializerepo(self):
""" Fill empty directory with products and make first commit """
try:
os.mkdir(self.repopath)
except OSError:
pass
cmd = self.repo.init(bare=self.bare, shared=self.shared)
if not self.bare:
self.write_testing_data([], [])
self.write_training_data([], [])
self.write_classifier(None)
cmd = self.repo.add('training.pkl')
cmd = self.repo.add('testing.pkl')
cmd = self.repo.add('classifier.pkl')
cmd = self.repo.commit(m='initial commit')
cmd = self.repo.tag('initial')
cmd = self.set_version('initial') |
def getsource(obj,is_binary=False):
"""Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Inputs:
- obj: an object whose source code we will attempt to extract.
Optional inputs:
- is_binary: whether the object is known to come from a binary source.
This implementation will skip returning any output for binary objects, but
custom extractors may know how to meaningfully process them."""
if is_binary:
return None
else:
# get source if obj was decorated with @decorator
if hasattr(obj,"__wrapped__"):
obj = obj.__wrapped__
try:
src = inspect.getsource(obj)
except TypeError:
if hasattr(obj,'__class__'):
src = inspect.getsource(obj.__class__)
return src | Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Inputs:
- obj: an object whose source code we will attempt to extract.
Optional inputs:
- is_binary: whether the object is known to come from a binary source.
This implementation will skip returning any output for binary objects, but
custom extractors may know how to meaningfully process them. | Below is the the instruction that describes the task:
### Input:
Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Inputs:
- obj: an object whose source code we will attempt to extract.
Optional inputs:
- is_binary: whether the object is known to come from a binary source.
This implementation will skip returning any output for binary objects, but
custom extractors may know how to meaningfully process them.
### Response:
def getsource(obj,is_binary=False):
"""Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Inputs:
- obj: an object whose source code we will attempt to extract.
Optional inputs:
- is_binary: whether the object is known to come from a binary source.
This implementation will skip returning any output for binary objects, but
custom extractors may know how to meaningfully process them."""
if is_binary:
return None
else:
# get source if obj was decorated with @decorator
if hasattr(obj,"__wrapped__"):
obj = obj.__wrapped__
try:
src = inspect.getsource(obj)
except TypeError:
if hasattr(obj,'__class__'):
src = inspect.getsource(obj.__class__)
return src |
def client_path_to_os_path(self, client_path):
"""
Converts a client path into the operating system's path by
replacing instances of '/' with os.path.sep.
Note: If the client path contains any instances of
os.path.sep already, they will be replaced with '-'.
"""
if os.path.sep == '/':
return client_path
return client_path.replace(os.path.sep, '-').replace('/', os.path.sep) | Converts a client path into the operating system's path by
replacing instances of '/' with os.path.sep.
Note: If the client path contains any instances of
os.path.sep already, they will be replaced with '-'. | Below is the the instruction that describes the task:
### Input:
Converts a client path into the operating system's path by
replacing instances of '/' with os.path.sep.
Note: If the client path contains any instances of
os.path.sep already, they will be replaced with '-'.
### Response:
def client_path_to_os_path(self, client_path):
"""
Converts a client path into the operating system's path by
replacing instances of '/' with os.path.sep.
Note: If the client path contains any instances of
os.path.sep already, they will be replaced with '-'.
"""
if os.path.sep == '/':
return client_path
return client_path.replace(os.path.sep, '-').replace('/', os.path.sep) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.