code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def getWriteSession(self):
''' return unscope session, TODO, make it clear '''
if self.WriteSession is None:
self.WriteSession=sessionmaker(bind=self.engine)
self.writeSession=self.WriteSession()
return self.writeSession | return unscope session, TODO, make it clear | Below is the the instruction that describes the task:
### Input:
return unscope session, TODO, make it clear
### Response:
def getWriteSession(self):
''' return unscope session, TODO, make it clear '''
if self.WriteSession is None:
self.WriteSession=sessionmaker(bind=self.engine)
self.writeSession=self.WriteSession()
return self.writeSession |
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict | Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend. | Below is the the instruction that describes the task:
### Input:
Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
### Response:
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict |
def submitter(self):
"""
| Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket
"""
if self.api and self.submitter_id:
return self.api._get_user(self.submitter_id) | | Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket | Below is the the instruction that describes the task:
### Input:
| Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket
### Response:
def submitter(self):
"""
| Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket
"""
if self.api and self.submitter_id:
return self.api._get_user(self.submitter_id) |
def set_dtype(self, opt, dtype):
"""Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
"""
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype
else:
self.dtype = np.dtype(opt['DataType']) | Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option) | Below is the the instruction that describes the task:
### Input:
Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
### Response:
def set_dtype(self, opt, dtype):
"""Set the `dtype` attribute. If opt['DataType'] has a value
other than None, it overrides the `dtype` parameter of this
method. No changes are made if the `dtype` attribute already
exists and has a value other than 'None'.
Parameters
----------
opt : :class:`cdict.ConstrainedDict` object
Algorithm options
dtype : data-type
Data type for working variables (overridden by 'DataType' option)
"""
# Take no action of self.dtype exists and is not None
if not hasattr(self, 'dtype') or self.dtype is None:
# DataType option overrides explicitly specified data type
if opt['DataType'] is None:
self.dtype = dtype
else:
self.dtype = np.dtype(opt['DataType']) |
def is_alert_present(self):
"""Tests if an alert is present
@return: True if alert is present, False otherwise
"""
current_frame = None
try:
current_frame = self.driver.current_window_handle
a = self.driver.switch_to_alert()
a.text
except NoAlertPresentException:
# No alert
return False
except UnexpectedAlertPresentException:
# Alert exists
return True
finally:
if current_frame:
self.driver.switch_to_window(current_frame)
return True | Tests if an alert is present
@return: True if alert is present, False otherwise | Below is the the instruction that describes the task:
### Input:
Tests if an alert is present
@return: True if alert is present, False otherwise
### Response:
def is_alert_present(self):
"""Tests if an alert is present
@return: True if alert is present, False otherwise
"""
current_frame = None
try:
current_frame = self.driver.current_window_handle
a = self.driver.switch_to_alert()
a.text
except NoAlertPresentException:
# No alert
return False
except UnexpectedAlertPresentException:
# Alert exists
return True
finally:
if current_frame:
self.driver.switch_to_window(current_frame)
return True |
def write_long(self, number):
""" Writes a long integer to the underlying output file as a 8-byte value. """
buf = pack(self.byte_order + "q", number)
self.write(buf) | Writes a long integer to the underlying output file as a 8-byte value. | Below is the the instruction that describes the task:
### Input:
Writes a long integer to the underlying output file as a 8-byte value.
### Response:
def write_long(self, number):
""" Writes a long integer to the underlying output file as a 8-byte value. """
buf = pack(self.byte_order + "q", number)
self.write(buf) |
def update(self, value=None, force=False, **kwargs):
'Updates the ProgressBar to a new value.'
if self.start_time is None:
self.start()
return self.update(value, force=force, **kwargs)
if value is not None and value is not base.UnknownLength:
if self.max_value is base.UnknownLength:
# Can't compare against unknown lengths so just update
pass
elif self.min_value <= value <= self.max_value: # pragma: no cover
# Correct value, let's accept
pass
elif self.max_error:
raise ValueError(
'Value %s is out of range, should be between %s and %s'
% (value, self.min_value, self.max_value))
else:
self.max_value = value
self.previous_value = self.value
self.value = value
minimum_update_interval = self._MINIMUM_UPDATE_INTERVAL
delta = timeit.default_timer() - self._last_update_timer
if delta < minimum_update_interval and not force:
# Prevent updating too often
return
# Save the updated values for dynamic messages
for key in kwargs:
if key in self.dynamic_messages:
self.dynamic_messages[key] = kwargs[key]
else:
raise TypeError(
'update() got an unexpected keyword ' +
'argument {0!r}'.format(key))
if self._needs_update() or force:
self.updates += 1
ResizableMixin.update(self, value=value)
ProgressBarBase.update(self, value=value)
StdRedirectMixin.update(self, value=value)
# Only flush if something was actually written
self.fd.flush() | Updates the ProgressBar to a new value. | Below is the the instruction that describes the task:
### Input:
Updates the ProgressBar to a new value.
### Response:
def update(self, value=None, force=False, **kwargs):
'Updates the ProgressBar to a new value.'
if self.start_time is None:
self.start()
return self.update(value, force=force, **kwargs)
if value is not None and value is not base.UnknownLength:
if self.max_value is base.UnknownLength:
# Can't compare against unknown lengths so just update
pass
elif self.min_value <= value <= self.max_value: # pragma: no cover
# Correct value, let's accept
pass
elif self.max_error:
raise ValueError(
'Value %s is out of range, should be between %s and %s'
% (value, self.min_value, self.max_value))
else:
self.max_value = value
self.previous_value = self.value
self.value = value
minimum_update_interval = self._MINIMUM_UPDATE_INTERVAL
delta = timeit.default_timer() - self._last_update_timer
if delta < minimum_update_interval and not force:
# Prevent updating too often
return
# Save the updated values for dynamic messages
for key in kwargs:
if key in self.dynamic_messages:
self.dynamic_messages[key] = kwargs[key]
else:
raise TypeError(
'update() got an unexpected keyword ' +
'argument {0!r}'.format(key))
if self._needs_update() or force:
self.updates += 1
ResizableMixin.update(self, value=value)
ProgressBarBase.update(self, value=value)
StdRedirectMixin.update(self, value=value)
# Only flush if something was actually written
self.fd.flush() |
def cls_register(cls, frameset, new_class, init_args, name=None):
""" Register a new FrameSet or FrameSet subclass as a member/attribute
of a class.
Returns the new FrameSet or FrameSet subclass.
Arguments:
frameset : An existing FrameSet, or an iterable of strings.
init_args : A list of properties from the `frameset` to try to use
for initializing the new FrameSet.
new_class : The class type to initialize.
name : New name for the FrameSet, also used as the
classes attribute name.
If the `frameset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
"""
name = name or getattr(frameset, 'name', None)
if name is None:
raise ValueError(
'`name` is needed when the `frameset` has no name attribute.'
)
kwargs = {'name': name}
for initarg in init_args:
kwargs[initarg] = getattr(frameset, initarg, None)
newframeset = new_class(frameset, **kwargs)
# Mark this FrameSet/BarSet as a registered item (not basic/original).
newframeset._registered = True
setattr(cls, name, newframeset)
return newframeset | Register a new FrameSet or FrameSet subclass as a member/attribute
of a class.
Returns the new FrameSet or FrameSet subclass.
Arguments:
frameset : An existing FrameSet, or an iterable of strings.
init_args : A list of properties from the `frameset` to try to use
for initializing the new FrameSet.
new_class : The class type to initialize.
name : New name for the FrameSet, also used as the
classes attribute name.
If the `frameset` object has not `name` attribute,
this argument is required. It must not be empty
when given. | Below is the the instruction that describes the task:
### Input:
Register a new FrameSet or FrameSet subclass as a member/attribute
of a class.
Returns the new FrameSet or FrameSet subclass.
Arguments:
frameset : An existing FrameSet, or an iterable of strings.
init_args : A list of properties from the `frameset` to try to use
for initializing the new FrameSet.
new_class : The class type to initialize.
name : New name for the FrameSet, also used as the
classes attribute name.
If the `frameset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
### Response:
def cls_register(cls, frameset, new_class, init_args, name=None):
""" Register a new FrameSet or FrameSet subclass as a member/attribute
of a class.
Returns the new FrameSet or FrameSet subclass.
Arguments:
frameset : An existing FrameSet, or an iterable of strings.
init_args : A list of properties from the `frameset` to try to use
for initializing the new FrameSet.
new_class : The class type to initialize.
name : New name for the FrameSet, also used as the
classes attribute name.
If the `frameset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
"""
name = name or getattr(frameset, 'name', None)
if name is None:
raise ValueError(
'`name` is needed when the `frameset` has no name attribute.'
)
kwargs = {'name': name}
for initarg in init_args:
kwargs[initarg] = getattr(frameset, initarg, None)
newframeset = new_class(frameset, **kwargs)
# Mark this FrameSet/BarSet as a registered item (not basic/original).
newframeset._registered = True
setattr(cls, name, newframeset)
return newframeset |
def lifted_state_operator(state: TensorProductState, qubits: List[int]):
"""Take a TensorProductState along with a list of qubits and return a matrix
corresponding to the tensored-up representation of the states' density operator form.
Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right.
Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``,
we build up the lifted matrix by using the *left* kronecker product.
:param state: The state
:param qubits: list of qubits in the order they will be represented in the resultant matrix.
"""
mat = 1.0
for qubit in qubits:
oneq_state = state[qubit]
assert oneq_state.qubit == qubit
state_vector = STATES[oneq_state.label][oneq_state.index][:, np.newaxis]
state_matrix = state_vector @ state_vector.conj().T
mat = np.kron(state_matrix, mat)
return mat | Take a TensorProductState along with a list of qubits and return a matrix
corresponding to the tensored-up representation of the states' density operator form.
Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right.
Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``,
we build up the lifted matrix by using the *left* kronecker product.
:param state: The state
:param qubits: list of qubits in the order they will be represented in the resultant matrix. | Below is the the instruction that describes the task:
### Input:
Take a TensorProductState along with a list of qubits and return a matrix
corresponding to the tensored-up representation of the states' density operator form.
Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right.
Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``,
we build up the lifted matrix by using the *left* kronecker product.
:param state: The state
:param qubits: list of qubits in the order they will be represented in the resultant matrix.
### Response:
def lifted_state_operator(state: TensorProductState, qubits: List[int]):
"""Take a TensorProductState along with a list of qubits and return a matrix
corresponding to the tensored-up representation of the states' density operator form.
Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right.
Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``,
we build up the lifted matrix by using the *left* kronecker product.
:param state: The state
:param qubits: list of qubits in the order they will be represented in the resultant matrix.
"""
mat = 1.0
for qubit in qubits:
oneq_state = state[qubit]
assert oneq_state.qubit == qubit
state_vector = STATES[oneq_state.label][oneq_state.index][:, np.newaxis]
state_matrix = state_vector @ state_vector.conj().T
mat = np.kron(state_matrix, mat)
return mat |
def task_delete(self, **kw):
""" Marks a task as deleted. """
id, task = self.get_task(**kw)
if task['status'] == Status.DELETED:
raise ValueError("Task is already deleted.")
self._execute(id, 'delete')
return self.get_task(uuid=task['uuid'])[1] | Marks a task as deleted. | Below is the the instruction that describes the task:
### Input:
Marks a task as deleted.
### Response:
def task_delete(self, **kw):
""" Marks a task as deleted. """
id, task = self.get_task(**kw)
if task['status'] == Status.DELETED:
raise ValueError("Task is already deleted.")
self._execute(id, 'delete')
return self.get_task(uuid=task['uuid'])[1] |
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model | run CLA model SP through data record 0:nTrain nMultiplePass passes | Below is the the instruction that describes the task:
### Input:
run CLA model SP through data record 0:nTrain nMultiplePass passes
### Response:
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model |
def _is_dynamic(v: Var) -> bool:
"""Return True if the Var holds a value which should be compiled to a dynamic
Var access."""
return (
Maybe(v.meta)
.map(lambda m: m.get(SYM_DYNAMIC_META_KEY, None)) # type: ignore
.or_else_get(False)
) | Return True if the Var holds a value which should be compiled to a dynamic
Var access. | Below is the the instruction that describes the task:
### Input:
Return True if the Var holds a value which should be compiled to a dynamic
Var access.
### Response:
def _is_dynamic(v: Var) -> bool:
"""Return True if the Var holds a value which should be compiled to a dynamic
Var access."""
return (
Maybe(v.meta)
.map(lambda m: m.get(SYM_DYNAMIC_META_KEY, None)) # type: ignore
.or_else_get(False)
) |
def _command_line(): # pragma: no cover pylint: disable=too-many-branches,too-many-statements
"""
Provide the command line interface.
"""
if __name__ == "PyFunceble":
# We initiate the end of the coloration at the end of each line.
initiate(autoreset=True)
# We load the configuration and the directory structure.
load_config(True)
try:
# The following handle the command line argument.
try:
PARSER = argparse.ArgumentParser(
epilog="Crafted with %s by %s"
% (
Fore.RED + "♥" + Fore.RESET,
Style.BRIGHT
+ Fore.CYAN
+ "Nissar Chababy (Funilrys) "
+ Style.RESET_ALL
+ "with the help of "
+ Style.BRIGHT
+ Fore.GREEN
+ "https://pyfunceble.rtfd.io/en/master/contributors.html "
+ Style.RESET_ALL
+ "&& "
+ Style.BRIGHT
+ Fore.GREEN
+ "https://pyfunceble.rtfd.io/en/master/special-thanks.html",
),
add_help=False,
)
CURRENT_VALUE_FORMAT = (
Fore.YELLOW + Style.BRIGHT + "Configured value: " + Fore.BLUE
)
PARSER.add_argument(
"-ad",
"--adblock",
action="store_true",
help="Switch the decoding of the adblock format. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["adblock"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-a",
"--all",
action="store_false",
help="Output all available information on the screen. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["less"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"" "-c",
"--auto-continue",
"--continue",
action="store_true",
help="Switch the value of the auto continue mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["auto_continue"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--autosave-minutes",
type=int,
help="Update the minimum of minutes before we start "
"committing to upstream under Travis CI. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_autosave_minutes"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--clean", action="store_true", help="Clean all files under output."
)
PARSER.add_argument(
"--clean-all",
action="store_true",
help="Clean all files under output and all file generated by PyFunceble.",
)
PARSER.add_argument(
"--cmd",
type=str,
help="Pass a command to run before each commit "
"(except the final one) under the Travis mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["command_before_end"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--cmd-before-end",
type=str,
help="Pass a command to run before the results "
"(final) commit under the Travis mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["command_before_end"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--commit-autosave-message",
type=str,
help="Replace the default autosave commit message. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_autosave_commit"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--commit-results-message",
type=str,
help="Replace the default results (final) commit message. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_autosave_final_commit"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-d", "--domain", type=str, help="Set and test the given domain."
)
PARSER.add_argument(
"-db",
"--database",
action="store_true",
help="Switch the value of the usage of a database to store "
"inactive domains of the currently tested list. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["inactive_database"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-dbr",
"--days-between-db-retest",
type=int,
help="Set the numbers of days between each retest of domains present "
"into inactive-db.json. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["days_between_db_retest"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--debug",
action="store_true",
help="Switch the value of the debug mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["debug"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--directory-structure",
action="store_true",
help="Generate the directory and files that are needed and which does "
"not exist in the current directory.",
)
PARSER.add_argument(
"-ex",
"--execution",
action="store_true",
help="Switch the default value of the execution time showing. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["show_execution_time"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-f",
"--file",
type=str,
help="Read the given file and test all domains inside it. "
"If a URL is given we download and test the content of the given URL.", # pylint: disable=line-too-long
)
PARSER.add_argument(
"--filter", type=str, help="Domain to filter (regex)."
)
PARSER.add_argument(
"--help",
action="help",
default=argparse.SUPPRESS,
help="Show this help message and exit.",
)
PARSER.add_argument(
"--hierarchical",
action="store_true",
help="Switch the value of the hierarchical sorting of the tested file. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["hierarchical_sorting"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-h",
"--host",
action="store_true",
help="Switch the value of the generation of hosts file. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["generate_hosts"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--http",
action="store_true",
help="Switch the value of the usage of HTTP code. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(HTTP_CODE["active"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--iana",
action="store_true",
help="Update/Generate `iana-domains-db.json`.",
)
PARSER.add_argument(
"--idna",
action="store_true",
help="Switch the value of the IDNA conversion. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["idna_conversion"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-ip",
type=str,
help="Change the IP to print in the hosts files with the given one. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["custom_ip"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--json",
action="store_true",
help="Switch the value of the generation "
"of the JSON formatted list of domains. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["generate_json"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--less",
action="store_true",
help="Output less informations on screen. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(Core.switch("less"))
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--local",
action="store_true",
help="Switch the value of the local network testing. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(Core.switch("local"))
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--link", type=str, help="Download and test the given file."
)
PARSER.add_argument(
"-m",
"--mining",
action="store_true",
help="Switch the value of the mining subsystem usage. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["mining"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-n",
"--no-files",
action="store_true",
help="Switch the value of the production of output files. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["no_files"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-nl",
"--no-logs",
action="store_true",
help="Switch the value of the production of logs files "
"in the case we encounter some errors. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(not CONFIGURATION["logs"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-ns",
"--no-special",
action="store_true",
help="Switch the value of the usage of the SPECIAL rules. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["no_special"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-nu",
"--no-unified",
action="store_true",
help="Switch the value of the production unified logs "
"under the output directory. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["unified"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-nw",
"--no-whois",
action="store_true",
help="Switch the value the usage of whois to test domain's status. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["no_whois"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-p",
"--percentage",
action="store_true",
help="Switch the value of the percentage output mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["show_percentage"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--plain",
action="store_true",
help="Switch the value of the generation "
"of the plain list of domains. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["plain_list_domain"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--production",
action="store_true",
help="Prepare the repository for production.",
)
PARSER.add_argument(
"-psl",
"--public-suffix",
action="store_true",
help="Update/Generate `public-suffix.json`.",
)
PARSER.add_argument(
"-q",
"--quiet",
action="store_true",
help="Run the script in quiet mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["quiet"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--share-logs",
action="store_true",
help="Switch the value of the sharing of logs. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["share_logs"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-s",
"--simple",
action="store_true",
help="Switch the value of the simple output mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["simple"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--split",
action="store_true",
help="Switch the value of the split of the generated output files. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["inactive_database"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--syntax",
action="store_true",
help="Switch the value of the syntax test mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["syntax"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-t",
"--timeout",
type=int,
default=3,
help="Switch the value of the timeout. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["seconds_before_http_timeout"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--travis",
action="store_true",
help="Switch the value of the Travis mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--travis-branch",
type=str,
default="master",
help="Switch the branch name where we are going to push. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_branch"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-u", "--url", type=str, help="Analyze the given URL."
)
PARSER.add_argument(
"-uf",
"--url-file",
type=str,
help="Read and test the list of URL of the given file. "
"If a URL is given we download and test the content of the given URL.", # pylint: disable=line-too-long
)
PARSER.add_argument(
"-ua",
"--user-agent",
type=str,
help="Set the user-agent to use and set every time we "
"interact with everything which is not our logs sharing system.", # pylint: disable=line-too-long
)
PARSER.add_argument(
"-v",
"--version",
help="Show the version of PyFunceble and exit.",
action="version",
version="%(prog)s " + VERSION,
)
PARSER.add_argument(
"-vsc",
"--verify-ssl-certificate",
action="store_true",
help="Switch the value of the verification of the "
"SSL/TLS certificate when testing for URL. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["verify_ssl_certificate"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-wdb",
"--whois-database",
action="store_true",
help="Switch the value of the usage of a database to store "
"whois data in order to avoid whois servers rate limit. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["whois_database"])
+ Style.RESET_ALL
),
)
ARGS = PARSER.parse_args()
if ARGS.less:
CONFIGURATION.update({"less": ARGS.less})
elif not ARGS.all:
CONFIGURATION.update({"less": ARGS.all})
if ARGS.adblock:
CONFIGURATION.update({"adblock": Core.switch("adblock")})
if ARGS.auto_continue:
CONFIGURATION.update(
{"auto_continue": Core.switch("auto_continue")}
)
if ARGS.autosave_minutes:
CONFIGURATION.update(
{"travis_autosave_minutes": ARGS.autosave_minutes}
)
if ARGS.clean:
Clean(None)
if ARGS.clean_all:
Clean(None, ARGS.clean_all)
if ARGS.cmd:
CONFIGURATION.update({"command": ARGS.cmd})
if ARGS.cmd_before_end:
CONFIGURATION.update({"command_before_end": ARGS.cmd_before_end})
if ARGS.commit_autosave_message:
CONFIGURATION.update(
{"travis_autosave_commit": ARGS.commit_autosave_message}
)
if ARGS.commit_results_message:
CONFIGURATION.update(
{"travis_autosave_final_commit": ARGS.commit_results_message}
)
if ARGS.database:
CONFIGURATION.update(
{"inactive_database": Core.switch("inactive_database")}
)
if ARGS.days_between_db_retest:
CONFIGURATION.update(
{"days_between_db_retest": ARGS.days_between_db_retest}
)
if ARGS.debug:
CONFIGURATION.update({"debug": Core.switch("debug")})
if ARGS.directory_structure:
DirectoryStructure()
if ARGS.execution:
CONFIGURATION.update(
{"show_execution_time": Core.switch("show_execution_time")}
)
if ARGS.filter:
CONFIGURATION.update({"filter": ARGS.filter})
if ARGS.hierarchical:
CONFIGURATION.update(
{"hierarchical_sorting": Core.switch("hierarchical_sorting")}
)
if ARGS.host:
CONFIGURATION.update(
{"generate_hosts": Core.switch("generate_hosts")}
)
if ARGS.http:
HTTP_CODE.update({"active": Core.switch(HTTP_CODE["active"], True)})
if ARGS.iana:
IANA().update()
if ARGS.idna:
CONFIGURATION.update(
{"idna_conversion": Core.switch("idna_conversion")}
)
if ARGS.ip:
CONFIGURATION.update({"custom_ip": ARGS.ip})
if ARGS.json:
CONFIGURATION.update(
{"generate_json": Core.switch("generate_json")}
)
if ARGS.local:
CONFIGURATION.update({"local": Core.switch("local")})
if ARGS.mining:
CONFIGURATION.update({"mining": Core.switch("mining")})
if ARGS.no_files:
CONFIGURATION.update({"no_files": Core.switch("no_files")})
if ARGS.no_logs:
CONFIGURATION.update({"logs": Core.switch("logs")})
if ARGS.no_special:
CONFIGURATION.update({"no_special": Core.switch("no_special")})
if ARGS.no_unified:
CONFIGURATION.update({"unified": Core.switch("unified")})
if ARGS.no_whois:
CONFIGURATION.update({"no_whois": Core.switch("no_whois")})
if ARGS.percentage:
CONFIGURATION.update(
{"show_percentage": Core.switch("show_percentage")}
)
if ARGS.plain:
CONFIGURATION.update(
{"plain_list_domain": Core.switch("plain_list_domain")}
)
if ARGS.production:
Production()
if ARGS.public_suffix:
PublicSuffix().update()
if ARGS.quiet:
CONFIGURATION.update({"quiet": Core.switch("quiet")})
if ARGS.share_logs:
CONFIGURATION.update({"share_logs": Core.switch("share_logs")})
if ARGS.simple:
CONFIGURATION.update(
{"simple": Core.switch("simple"), "quiet": Core.switch("quiet")}
)
if ARGS.split:
CONFIGURATION.update({"split": Core.switch("split")})
if ARGS.syntax:
CONFIGURATION.update({"syntax": Core.switch("syntax")})
if ARGS.timeout and ARGS.timeout % 3 == 0:
CONFIGURATION.update({"seconds_before_http_timeout": ARGS.timeout})
if ARGS.travis:
CONFIGURATION.update({"travis": Core.switch("travis")})
if ARGS.travis_branch:
CONFIGURATION.update({"travis_branch": ARGS.travis_branch})
if ARGS.user_agent:
CONFIGURATION.update({"user_agent": ARGS.user_agent})
if ARGS.verify_ssl_certificate:
CONFIGURATION.update(
{"verify_ssl_certificate": ARGS.verify_ssl_certificate}
)
if ARGS.whois_database:
CONFIGURATION.update(
{"whois_database": Core.switch("whois_database")}
)
if not CONFIGURATION["quiet"]:
Core.colorify_logo(home=True)
# We compare the versions (upstream and local) and in between.
Version().compare()
# We call our Core which will handle all case depending of the configuration or
# the used command line arguments.
Core(
domain_or_ip_to_test=ARGS.domain,
file_path=ARGS.file,
url_to_test=ARGS.url,
url_file=ARGS.url_file,
link_to_test=ARGS.link,
)
except KeyError as e:
if not Version(True).is_cloned():
# We are not into the cloned version.
# We merge the local with the upstream configuration.
Merge(CURRENT_DIRECTORY)
else:
# We are in the cloned version.
# We raise the exception.
#
# Note: The purpose of this is to avoid having
# to search for a mistake while developing.
raise e
except KeyboardInterrupt:
stay_safe() | Provide the command line interface. | Below is the the instruction that describes the task:
### Input:
Provide the command line interface.
### Response:
def _command_line(): # pragma: no cover pylint: disable=too-many-branches,too-many-statements
"""
Provide the command line interface.
"""
if __name__ == "PyFunceble":
# We initiate the end of the coloration at the end of each line.
initiate(autoreset=True)
# We load the configuration and the directory structure.
load_config(True)
try:
# The following handle the command line argument.
try:
PARSER = argparse.ArgumentParser(
epilog="Crafted with %s by %s"
% (
Fore.RED + "♥" + Fore.RESET,
Style.BRIGHT
+ Fore.CYAN
+ "Nissar Chababy (Funilrys) "
+ Style.RESET_ALL
+ "with the help of "
+ Style.BRIGHT
+ Fore.GREEN
+ "https://pyfunceble.rtfd.io/en/master/contributors.html "
+ Style.RESET_ALL
+ "&& "
+ Style.BRIGHT
+ Fore.GREEN
+ "https://pyfunceble.rtfd.io/en/master/special-thanks.html",
),
add_help=False,
)
CURRENT_VALUE_FORMAT = (
Fore.YELLOW + Style.BRIGHT + "Configured value: " + Fore.BLUE
)
PARSER.add_argument(
"-ad",
"--adblock",
action="store_true",
help="Switch the decoding of the adblock format. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["adblock"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-a",
"--all",
action="store_false",
help="Output all available information on the screen. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["less"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"" "-c",
"--auto-continue",
"--continue",
action="store_true",
help="Switch the value of the auto continue mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["auto_continue"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--autosave-minutes",
type=int,
help="Update the minimum of minutes before we start "
"committing to upstream under Travis CI. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_autosave_minutes"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--clean", action="store_true", help="Clean all files under output."
)
PARSER.add_argument(
"--clean-all",
action="store_true",
help="Clean all files under output and all file generated by PyFunceble.",
)
PARSER.add_argument(
"--cmd",
type=str,
help="Pass a command to run before each commit "
"(except the final one) under the Travis mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["command_before_end"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--cmd-before-end",
type=str,
help="Pass a command to run before the results "
"(final) commit under the Travis mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["command_before_end"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--commit-autosave-message",
type=str,
help="Replace the default autosave commit message. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_autosave_commit"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--commit-results-message",
type=str,
help="Replace the default results (final) commit message. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_autosave_final_commit"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-d", "--domain", type=str, help="Set and test the given domain."
)
PARSER.add_argument(
"-db",
"--database",
action="store_true",
help="Switch the value of the usage of a database to store "
"inactive domains of the currently tested list. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["inactive_database"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-dbr",
"--days-between-db-retest",
type=int,
help="Set the numbers of days between each retest of domains present "
"into inactive-db.json. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["days_between_db_retest"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--debug",
action="store_true",
help="Switch the value of the debug mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["debug"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--directory-structure",
action="store_true",
help="Generate the directory and files that are needed and which does "
"not exist in the current directory.",
)
PARSER.add_argument(
"-ex",
"--execution",
action="store_true",
help="Switch the default value of the execution time showing. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["show_execution_time"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-f",
"--file",
type=str,
help="Read the given file and test all domains inside it. "
"If a URL is given we download and test the content of the given URL.", # pylint: disable=line-too-long
)
PARSER.add_argument(
"--filter", type=str, help="Domain to filter (regex)."
)
PARSER.add_argument(
"--help",
action="help",
default=argparse.SUPPRESS,
help="Show this help message and exit.",
)
PARSER.add_argument(
"--hierarchical",
action="store_true",
help="Switch the value of the hierarchical sorting of the tested file. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["hierarchical_sorting"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-h",
"--host",
action="store_true",
help="Switch the value of the generation of hosts file. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["generate_hosts"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--http",
action="store_true",
help="Switch the value of the usage of HTTP code. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(HTTP_CODE["active"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--iana",
action="store_true",
help="Update/Generate `iana-domains-db.json`.",
)
PARSER.add_argument(
"--idna",
action="store_true",
help="Switch the value of the IDNA conversion. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["idna_conversion"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-ip",
type=str,
help="Change the IP to print in the hosts files with the given one. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["custom_ip"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--json",
action="store_true",
help="Switch the value of the generation "
"of the JSON formatted list of domains. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["generate_json"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--less",
action="store_true",
help="Output less informations on screen. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(Core.switch("less"))
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--local",
action="store_true",
help="Switch the value of the local network testing. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(Core.switch("local"))
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--link", type=str, help="Download and test the given file."
)
PARSER.add_argument(
"-m",
"--mining",
action="store_true",
help="Switch the value of the mining subsystem usage. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["mining"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-n",
"--no-files",
action="store_true",
help="Switch the value of the production of output files. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["no_files"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-nl",
"--no-logs",
action="store_true",
help="Switch the value of the production of logs files "
"in the case we encounter some errors. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(not CONFIGURATION["logs"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-ns",
"--no-special",
action="store_true",
help="Switch the value of the usage of the SPECIAL rules. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["no_special"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-nu",
"--no-unified",
action="store_true",
help="Switch the value of the production unified logs "
"under the output directory. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["unified"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-nw",
"--no-whois",
action="store_true",
help="Switch the value the usage of whois to test domain's status. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["no_whois"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-p",
"--percentage",
action="store_true",
help="Switch the value of the percentage output mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["show_percentage"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--plain",
action="store_true",
help="Switch the value of the generation "
"of the plain list of domains. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["plain_list_domain"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--production",
action="store_true",
help="Prepare the repository for production.",
)
PARSER.add_argument(
"-psl",
"--public-suffix",
action="store_true",
help="Update/Generate `public-suffix.json`.",
)
PARSER.add_argument(
"-q",
"--quiet",
action="store_true",
help="Run the script in quiet mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["quiet"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--share-logs",
action="store_true",
help="Switch the value of the sharing of logs. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["share_logs"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-s",
"--simple",
action="store_true",
help="Switch the value of the simple output mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["simple"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--split",
action="store_true",
help="Switch the value of the split of the generated output files. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["inactive_database"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--syntax",
action="store_true",
help="Switch the value of the syntax test mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["syntax"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-t",
"--timeout",
type=int,
default=3,
help="Switch the value of the timeout. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["seconds_before_http_timeout"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--travis",
action="store_true",
help="Switch the value of the Travis mode. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"--travis-branch",
type=str,
default="master",
help="Switch the branch name where we are going to push. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["travis_branch"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-u", "--url", type=str, help="Analyze the given URL."
)
PARSER.add_argument(
"-uf",
"--url-file",
type=str,
help="Read and test the list of URL of the given file. "
"If a URL is given we download and test the content of the given URL.", # pylint: disable=line-too-long
)
PARSER.add_argument(
"-ua",
"--user-agent",
type=str,
help="Set the user-agent to use and set every time we "
"interact with everything which is not our logs sharing system.", # pylint: disable=line-too-long
)
PARSER.add_argument(
"-v",
"--version",
help="Show the version of PyFunceble and exit.",
action="version",
version="%(prog)s " + VERSION,
)
PARSER.add_argument(
"-vsc",
"--verify-ssl-certificate",
action="store_true",
help="Switch the value of the verification of the "
"SSL/TLS certificate when testing for URL. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["verify_ssl_certificate"])
+ Style.RESET_ALL
),
)
PARSER.add_argument(
"-wdb",
"--whois-database",
action="store_true",
help="Switch the value of the usage of a database to store "
"whois data in order to avoid whois servers rate limit. %s"
% (
CURRENT_VALUE_FORMAT
+ repr(CONFIGURATION["whois_database"])
+ Style.RESET_ALL
),
)
ARGS = PARSER.parse_args()
if ARGS.less:
CONFIGURATION.update({"less": ARGS.less})
elif not ARGS.all:
CONFIGURATION.update({"less": ARGS.all})
if ARGS.adblock:
CONFIGURATION.update({"adblock": Core.switch("adblock")})
if ARGS.auto_continue:
CONFIGURATION.update(
{"auto_continue": Core.switch("auto_continue")}
)
if ARGS.autosave_minutes:
CONFIGURATION.update(
{"travis_autosave_minutes": ARGS.autosave_minutes}
)
if ARGS.clean:
Clean(None)
if ARGS.clean_all:
Clean(None, ARGS.clean_all)
if ARGS.cmd:
CONFIGURATION.update({"command": ARGS.cmd})
if ARGS.cmd_before_end:
CONFIGURATION.update({"command_before_end": ARGS.cmd_before_end})
if ARGS.commit_autosave_message:
CONFIGURATION.update(
{"travis_autosave_commit": ARGS.commit_autosave_message}
)
if ARGS.commit_results_message:
CONFIGURATION.update(
{"travis_autosave_final_commit": ARGS.commit_results_message}
)
if ARGS.database:
CONFIGURATION.update(
{"inactive_database": Core.switch("inactive_database")}
)
if ARGS.days_between_db_retest:
CONFIGURATION.update(
{"days_between_db_retest": ARGS.days_between_db_retest}
)
if ARGS.debug:
CONFIGURATION.update({"debug": Core.switch("debug")})
if ARGS.directory_structure:
DirectoryStructure()
if ARGS.execution:
CONFIGURATION.update(
{"show_execution_time": Core.switch("show_execution_time")}
)
if ARGS.filter:
CONFIGURATION.update({"filter": ARGS.filter})
if ARGS.hierarchical:
CONFIGURATION.update(
{"hierarchical_sorting": Core.switch("hierarchical_sorting")}
)
if ARGS.host:
CONFIGURATION.update(
{"generate_hosts": Core.switch("generate_hosts")}
)
if ARGS.http:
HTTP_CODE.update({"active": Core.switch(HTTP_CODE["active"], True)})
if ARGS.iana:
IANA().update()
if ARGS.idna:
CONFIGURATION.update(
{"idna_conversion": Core.switch("idna_conversion")}
)
if ARGS.ip:
CONFIGURATION.update({"custom_ip": ARGS.ip})
if ARGS.json:
CONFIGURATION.update(
{"generate_json": Core.switch("generate_json")}
)
if ARGS.local:
CONFIGURATION.update({"local": Core.switch("local")})
if ARGS.mining:
CONFIGURATION.update({"mining": Core.switch("mining")})
if ARGS.no_files:
CONFIGURATION.update({"no_files": Core.switch("no_files")})
if ARGS.no_logs:
CONFIGURATION.update({"logs": Core.switch("logs")})
if ARGS.no_special:
CONFIGURATION.update({"no_special": Core.switch("no_special")})
if ARGS.no_unified:
CONFIGURATION.update({"unified": Core.switch("unified")})
if ARGS.no_whois:
CONFIGURATION.update({"no_whois": Core.switch("no_whois")})
if ARGS.percentage:
CONFIGURATION.update(
{"show_percentage": Core.switch("show_percentage")}
)
if ARGS.plain:
CONFIGURATION.update(
{"plain_list_domain": Core.switch("plain_list_domain")}
)
if ARGS.production:
Production()
if ARGS.public_suffix:
PublicSuffix().update()
if ARGS.quiet:
CONFIGURATION.update({"quiet": Core.switch("quiet")})
if ARGS.share_logs:
CONFIGURATION.update({"share_logs": Core.switch("share_logs")})
if ARGS.simple:
CONFIGURATION.update(
{"simple": Core.switch("simple"), "quiet": Core.switch("quiet")}
)
if ARGS.split:
CONFIGURATION.update({"split": Core.switch("split")})
if ARGS.syntax:
CONFIGURATION.update({"syntax": Core.switch("syntax")})
if ARGS.timeout and ARGS.timeout % 3 == 0:
CONFIGURATION.update({"seconds_before_http_timeout": ARGS.timeout})
if ARGS.travis:
CONFIGURATION.update({"travis": Core.switch("travis")})
if ARGS.travis_branch:
CONFIGURATION.update({"travis_branch": ARGS.travis_branch})
if ARGS.user_agent:
CONFIGURATION.update({"user_agent": ARGS.user_agent})
if ARGS.verify_ssl_certificate:
CONFIGURATION.update(
{"verify_ssl_certificate": ARGS.verify_ssl_certificate}
)
if ARGS.whois_database:
CONFIGURATION.update(
{"whois_database": Core.switch("whois_database")}
)
if not CONFIGURATION["quiet"]:
Core.colorify_logo(home=True)
# We compare the versions (upstream and local) and in between.
Version().compare()
# We call our Core which will handle all case depending of the configuration or
# the used command line arguments.
Core(
domain_or_ip_to_test=ARGS.domain,
file_path=ARGS.file,
url_to_test=ARGS.url,
url_file=ARGS.url_file,
link_to_test=ARGS.link,
)
except KeyError as e:
if not Version(True).is_cloned():
# We are not into the cloned version.
# We merge the local with the upstream configuration.
Merge(CURRENT_DIRECTORY)
else:
# We are in the cloned version.
# We raise the exception.
#
# Note: The purpose of this is to avoid having
# to search for a mistake while developing.
raise e
except KeyboardInterrupt:
stay_safe() |
def clear_completion(self):
"""Clears the completion.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if (self.get_completion_metadata().is_read_only() or
self.get_completion_metadata().is_required()):
raise errors.NoAccess()
self._my_map['completion'] = self._completion_default | Clears the completion.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Clears the completion.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def clear_completion(self):
"""Clears the completion.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if (self.get_completion_metadata().is_read_only() or
self.get_completion_metadata().is_required()):
raise errors.NoAccess()
self._my_map['completion'] = self._completion_default |
def plotOneInferenceRun(stats,
fields,
basename,
itemType="",
plotDir="plots",
ymax=100,
trialNumber=0):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
# plot request stats
for field in fields:
fieldKey = field[0] + " C0"
plt.plot(stats[fieldKey], marker='+', label=field[1])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of cells")
plt.ylim(-5, ymax)
plt.title("Activity while inferring {}".format(itemType))
# save
relPath = "{}_exp_{}.pdf".format(basename, trialNumber)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close() | Plots individual inference runs. | Below is the the instruction that describes the task:
### Input:
Plots individual inference runs.
### Response:
def plotOneInferenceRun(stats,
fields,
basename,
itemType="",
plotDir="plots",
ymax=100,
trialNumber=0):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
# plot request stats
for field in fields:
fieldKey = field[0] + " C0"
plt.plot(stats[fieldKey], marker='+', label=field[1])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of cells")
plt.ylim(-5, ymax)
plt.title("Activity while inferring {}".format(itemType))
# save
relPath = "{}_exp_{}.pdf".format(basename, trialNumber)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close() |
def record_close(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /record-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose
"""
return DXHTTPRequest('/%s/close' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /record-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose | Below is the the instruction that describes the task:
### Input:
Invokes the /record-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose
### Response:
def record_close(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /record-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose
"""
return DXHTTPRequest('/%s/close' % object_id, input_params, always_retry=always_retry, **kwargs) |
def ticker(self, contract: Contract) -> Ticker:
"""
Get ticker of the given contract. It must have been requested before
with reqMktData with the same contract object. The ticker may not be
ready yet if called directly after :meth:`.reqMktData`.
Args:
contract: Contract to get ticker for.
"""
return self.wrapper.tickers.get(id(contract)) | Get ticker of the given contract. It must have been requested before
with reqMktData with the same contract object. The ticker may not be
ready yet if called directly after :meth:`.reqMktData`.
Args:
contract: Contract to get ticker for. | Below is the the instruction that describes the task:
### Input:
Get ticker of the given contract. It must have been requested before
with reqMktData with the same contract object. The ticker may not be
ready yet if called directly after :meth:`.reqMktData`.
Args:
contract: Contract to get ticker for.
### Response:
def ticker(self, contract: Contract) -> Ticker:
"""
Get ticker of the given contract. It must have been requested before
with reqMktData with the same contract object. The ticker may not be
ready yet if called directly after :meth:`.reqMktData`.
Args:
contract: Contract to get ticker for.
"""
return self.wrapper.tickers.get(id(contract)) |
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sp.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array | Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X. | Below is the the instruction that describes the task:
### Input:
Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
### Response:
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sp.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array |
def send_by_packet(self, data):
"""
Send data by packet on socket
"""
total_sent = 0
while total_sent < PACKET_SIZE:
sent = self.sock.send(data[total_sent:])
if sent == 0:
raise RuntimeError("socket connection broken")
total_sent += sent
return total_sent | Send data by packet on socket | Below is the the instruction that describes the task:
### Input:
Send data by packet on socket
### Response:
def send_by_packet(self, data):
"""
Send data by packet on socket
"""
total_sent = 0
while total_sent < PACKET_SIZE:
sent = self.sock.send(data[total_sent:])
if sent == 0:
raise RuntimeError("socket connection broken")
total_sent += sent
return total_sent |
def special_target_typecheck(value):
"""
Special type checking for the target object
:param value: <dict>
:return: <bool>
"""
result = True
# if key:Optional exists, it must be a dict object
result &= isinstance(value.get(TARGET_OPTIONAL_FIELD, dict()), dict)
return result | Special type checking for the target object
:param value: <dict>
:return: <bool> | Below is the the instruction that describes the task:
### Input:
Special type checking for the target object
:param value: <dict>
:return: <bool>
### Response:
def special_target_typecheck(value):
"""
Special type checking for the target object
:param value: <dict>
:return: <bool>
"""
result = True
# if key:Optional exists, it must be a dict object
result &= isinstance(value.get(TARGET_OPTIONAL_FIELD, dict()), dict)
return result |
def register(self, name, description, obj, plugin):
"""
Registers a new shared object.
:param name: Unique name for shared object
:type name: str
:param description: Description of shared object
:type description: str
:param obj: The object, which shall be shared
:type obj: any type
:param plugin: Plugin, which registers the new shared object
"""
if name in self._shared_objects.keys():
raise SharedObjectExistException("Shared Object %s already registered by %s"
% (name, self._shared_objects[name].plugin.name))
new_shared_object = SharedObject(name, description, obj, plugin)
self._shared_objects[name] = new_shared_object
self.log.debug("Shared object registered: %s" % name)
return new_shared_object | Registers a new shared object.
:param name: Unique name for shared object
:type name: str
:param description: Description of shared object
:type description: str
:param obj: The object, which shall be shared
:type obj: any type
:param plugin: Plugin, which registers the new shared object | Below is the the instruction that describes the task:
### Input:
Registers a new shared object.
:param name: Unique name for shared object
:type name: str
:param description: Description of shared object
:type description: str
:param obj: The object, which shall be shared
:type obj: any type
:param plugin: Plugin, which registers the new shared object
### Response:
def register(self, name, description, obj, plugin):
"""
Registers a new shared object.
:param name: Unique name for shared object
:type name: str
:param description: Description of shared object
:type description: str
:param obj: The object, which shall be shared
:type obj: any type
:param plugin: Plugin, which registers the new shared object
"""
if name in self._shared_objects.keys():
raise SharedObjectExistException("Shared Object %s already registered by %s"
% (name, self._shared_objects[name].plugin.name))
new_shared_object = SharedObject(name, description, obj, plugin)
self._shared_objects[name] = new_shared_object
self.log.debug("Shared object registered: %s" % name)
return new_shared_object |
def funname(file):
"""Return variable names from file names."""
if isinstance(file, str):
files = [file]
else:
files = file
bases = [os.path.basename(f) for f in files]
names = [os.path.splitext(b)[0] for b in bases]
if isinstance(file, str):
return names[0]
else:
return names | Return variable names from file names. | Below is the the instruction that describes the task:
### Input:
Return variable names from file names.
### Response:
def funname(file):
"""Return variable names from file names."""
if isinstance(file, str):
files = [file]
else:
files = file
bases = [os.path.basename(f) for f in files]
names = [os.path.splitext(b)[0] for b in bases]
if isinstance(file, str):
return names[0]
else:
return names |
def join_locale(comps):
'''
Join a locale specifier split in the format returned by split_locale.
'''
loc = comps['language']
if comps.get('territory'):
loc += '_' + comps['territory']
if comps.get('codeset'):
loc += '.' + comps['codeset']
if comps.get('modifier'):
loc += '@' + comps['modifier']
if comps.get('charmap'):
loc += ' ' + comps['charmap']
return loc | Join a locale specifier split in the format returned by split_locale. | Below is the the instruction that describes the task:
### Input:
Join a locale specifier split in the format returned by split_locale.
### Response:
def join_locale(comps):
'''
Join a locale specifier split in the format returned by split_locale.
'''
loc = comps['language']
if comps.get('territory'):
loc += '_' + comps['territory']
if comps.get('codeset'):
loc += '.' + comps['codeset']
if comps.get('modifier'):
loc += '@' + comps['modifier']
if comps.get('charmap'):
loc += ' ' + comps['charmap']
return loc |
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
"""
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
"""
entries = 0
next_values = []
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
# From root
if not path:
objects = self._list_locators()
# Sub directory
else:
objects = self._list_objects(
self.get_client_kwargs(path), max_request_entries)
# Yield file hierarchy
for obj in objects:
# Generate first level objects entries
try:
name, header, is_directory = obj
except ValueError:
# Locators
name, header = obj
is_directory = True
# Start to generate subdirectories content
if is_directory and not first_level:
name = next_path = name.rstrip('/') + '/'
if path:
next_path = '/'.join((path.rstrip('/'), name))
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
next_values.append((
name, self._generate_async(self.list_objects(
next_path, relative=True,
max_request_entries=max_request_entries_arg))))
entries += 1
yield name, header
if entries == max_request_entries:
return
for next_name, generator in next_values:
# Generate other levels objects entries
for name, header in generator:
entries += 1
yield '/'.join((next_name.rstrip('/'), name)), header
if entries == max_request_entries:
return | List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict | Below is the the instruction that describes the task:
### Input:
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
### Response:
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
"""
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
"""
entries = 0
next_values = []
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
# From root
if not path:
objects = self._list_locators()
# Sub directory
else:
objects = self._list_objects(
self.get_client_kwargs(path), max_request_entries)
# Yield file hierarchy
for obj in objects:
# Generate first level objects entries
try:
name, header, is_directory = obj
except ValueError:
# Locators
name, header = obj
is_directory = True
# Start to generate subdirectories content
if is_directory and not first_level:
name = next_path = name.rstrip('/') + '/'
if path:
next_path = '/'.join((path.rstrip('/'), name))
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
next_values.append((
name, self._generate_async(self.list_objects(
next_path, relative=True,
max_request_entries=max_request_entries_arg))))
entries += 1
yield name, header
if entries == max_request_entries:
return
for next_name, generator in next_values:
# Generate other levels objects entries
for name, header in generator:
entries += 1
yield '/'.join((next_name.rstrip('/'), name)), header
if entries == max_request_entries:
return |
def argmax(self):
"""Index of the maximum, ignorning nans."""
if "argmax" not in self.attrs.keys():
def f(dataset, s):
arr = dataset[s]
try:
amin = np.nanargmax(arr)
except ValueError:
amin = 0
idx = np.unravel_index(amin, arr.shape)
val = arr[idx]
return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)
chunk_res = self.chunkwise(f)
idxs = [i[0] for i in chunk_res.values()]
vals = [i[1] for i in chunk_res.values()]
self.attrs["argmax"] = idxs[np.nanargmax(vals)]
return tuple(self.attrs["argmax"]) | Index of the maximum, ignorning nans. | Below is the the instruction that describes the task:
### Input:
Index of the maximum, ignorning nans.
### Response:
def argmax(self):
"""Index of the maximum, ignorning nans."""
if "argmax" not in self.attrs.keys():
def f(dataset, s):
arr = dataset[s]
try:
amin = np.nanargmax(arr)
except ValueError:
amin = 0
idx = np.unravel_index(amin, arr.shape)
val = arr[idx]
return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)
chunk_res = self.chunkwise(f)
idxs = [i[0] for i in chunk_res.values()]
vals = [i[1] for i in chunk_res.values()]
self.attrs["argmax"] = idxs[np.nanargmax(vals)]
return tuple(self.attrs["argmax"]) |
def evaluate(best_processed_path, model):
"""
Evaluate model on splitted 10 percent testing set
"""
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
y_predict = model.predict([x_test_char, x_test_type])
y_predict = (y_predict.ravel() > 0.5).astype(int)
f1score = f1_score(y_test, y_predict)
precision = precision_score(y_test, y_predict)
recall = recall_score(y_test, y_predict)
return f1score, precision, recall | Evaluate model on splitted 10 percent testing set | Below is the the instruction that describes the task:
### Input:
Evaluate model on splitted 10 percent testing set
### Response:
def evaluate(best_processed_path, model):
"""
Evaluate model on splitted 10 percent testing set
"""
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
y_predict = model.predict([x_test_char, x_test_type])
y_predict = (y_predict.ravel() > 0.5).astype(int)
f1score = f1_score(y_test, y_predict)
precision = precision_score(y_test, y_predict)
recall = recall_score(y_test, y_predict)
return f1score, precision, recall |
def get_encryption_key_status(self):
"""Read information about the current encryption key used by Vault.
Supported methods:
GET: /sys/key-status. Produces: 200 application/json
:return: JSON response with information regarding the current encryption key used by Vault.
:rtype: dict
"""
api_path = '/v1/sys/key-status'
response = self._adapter.get(
url=api_path,
)
return response.json() | Read information about the current encryption key used by Vault.
Supported methods:
GET: /sys/key-status. Produces: 200 application/json
:return: JSON response with information regarding the current encryption key used by Vault.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Read information about the current encryption key used by Vault.
Supported methods:
GET: /sys/key-status. Produces: 200 application/json
:return: JSON response with information regarding the current encryption key used by Vault.
:rtype: dict
### Response:
def get_encryption_key_status(self):
"""Read information about the current encryption key used by Vault.
Supported methods:
GET: /sys/key-status. Produces: 200 application/json
:return: JSON response with information regarding the current encryption key used by Vault.
:rtype: dict
"""
api_path = '/v1/sys/key-status'
response = self._adapter.get(
url=api_path,
)
return response.json() |
def current(cls):
"""Get the current event loop singleton object.
"""
try:
return _tls.loop
except AttributeError:
# create loop only for main thread
if threading.current_thread().name == 'MainThread':
_tls.loop = cls()
return _tls.loop
raise RuntimeError('there is no event loop created in the current thread') | Get the current event loop singleton object. | Below is the the instruction that describes the task:
### Input:
Get the current event loop singleton object.
### Response:
def current(cls):
"""Get the current event loop singleton object.
"""
try:
return _tls.loop
except AttributeError:
# create loop only for main thread
if threading.current_thread().name == 'MainThread':
_tls.loop = cls()
return _tls.loop
raise RuntimeError('there is no event loop created in the current thread') |
def offset_gen(offset, iterable, skip_signal=None):
'''A generator that applies an `offset`, skipping `offset` elements from
`iterable`. If skip_signal is a callable, it will be called with every
skipped element.
'''
offset = int(offset)
assert offset >= 0, 'negative offset'
for item in iterable:
if offset > 0:
offset -= 1
if callable(skip_signal):
skip_signal(item)
else:
yield item | A generator that applies an `offset`, skipping `offset` elements from
`iterable`. If skip_signal is a callable, it will be called with every
skipped element. | Below is the the instruction that describes the task:
### Input:
A generator that applies an `offset`, skipping `offset` elements from
`iterable`. If skip_signal is a callable, it will be called with every
skipped element.
### Response:
def offset_gen(offset, iterable, skip_signal=None):
'''A generator that applies an `offset`, skipping `offset` elements from
`iterable`. If skip_signal is a callable, it will be called with every
skipped element.
'''
offset = int(offset)
assert offset >= 0, 'negative offset'
for item in iterable:
if offset > 0:
offset -= 1
if callable(skip_signal):
skip_signal(item)
else:
yield item |
def _add_resource_descriptions_to_pools(self, meta_list):
"""
Takes a list of resource descriptions adding them
to the resource pool they belong to scheduling them for loading.
"""
if not meta_list:
return
for meta in meta_list:
getattr(resources, meta.resource_type).add(meta) | Takes a list of resource descriptions adding them
to the resource pool they belong to scheduling them for loading. | Below is the the instruction that describes the task:
### Input:
Takes a list of resource descriptions adding them
to the resource pool they belong to scheduling them for loading.
### Response:
def _add_resource_descriptions_to_pools(self, meta_list):
"""
Takes a list of resource descriptions adding them
to the resource pool they belong to scheduling them for loading.
"""
if not meta_list:
return
for meta in meta_list:
getattr(resources, meta.resource_type).add(meta) |
def FetchMostRecentGraphSeries(label,
report_type,
token = None
):
"""Fetches the latest graph series for a client label from the DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected
report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
"""
if _ShouldUseLegacyDatastore():
return _FetchMostRecentGraphSeriesFromTheLegacyDB(
label, report_type, token=token)
return data_store.REL_DB.ReadMostRecentClientGraphSeries(label, report_type) | Fetches the latest graph series for a client label from the DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected
report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist. | Below is the the instruction that describes the task:
### Input:
Fetches the latest graph series for a client label from the DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected
report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
### Response:
def FetchMostRecentGraphSeries(label,
report_type,
token = None
):
"""Fetches the latest graph series for a client label from the DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected
report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
"""
if _ShouldUseLegacyDatastore():
return _FetchMostRecentGraphSeriesFromTheLegacyDB(
label, report_type, token=token)
return data_store.REL_DB.ReadMostRecentClientGraphSeries(label, report_type) |
def format(self, node_formatter=None, separator='/'):
# type: (Optional[Callable[[PathParam], str]]) -> str
"""
Format a URL path.
An optional `node_parser(PathNode)` can be supplied for converting a
`PathNode` into a string to support the current web framework.
"""
if self._nodes == ('',):
return separator
else:
node_formatter = node_formatter or self.odinweb_node_formatter
return separator.join(node_formatter(n) if isinstance(n, PathParam) else n for n in self._nodes) | Format a URL path.
An optional `node_parser(PathNode)` can be supplied for converting a
`PathNode` into a string to support the current web framework. | Below is the the instruction that describes the task:
### Input:
Format a URL path.
An optional `node_parser(PathNode)` can be supplied for converting a
`PathNode` into a string to support the current web framework.
### Response:
def format(self, node_formatter=None, separator='/'):
# type: (Optional[Callable[[PathParam], str]]) -> str
"""
Format a URL path.
An optional `node_parser(PathNode)` can be supplied for converting a
`PathNode` into a string to support the current web framework.
"""
if self._nodes == ('',):
return separator
else:
node_formatter = node_formatter or self.odinweb_node_formatter
return separator.join(node_formatter(n) if isinstance(n, PathParam) else n for n in self._nodes) |
def run_command(cmd, *args):
"""
Runs command on the system with given ``args``.
"""
command = ' '.join((cmd, args))
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return p.retcode, stdout, stderr | Runs command on the system with given ``args``. | Below is the the instruction that describes the task:
### Input:
Runs command on the system with given ``args``.
### Response:
def run_command(cmd, *args):
"""
Runs command on the system with given ``args``.
"""
command = ' '.join((cmd, args))
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return p.retcode, stdout, stderr |
def string_to_general_float(s: str) -> float:
"""
Convert a string to corresponding single or double precision scientific number.
:param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number
:return: a float or raise an error
.. doctest::
>>> string_to_general_float('1.0D-5')
1e-05
>>> string_to_general_float('1Dx')
Traceback (most recent call last):
...
ValueError: The string '1Dx' does not corresponds to a double precision number!
>>> string_to_general_float('.8d234')
8e+233
>>> string_to_general_float('0.1')
0.1
"""
if 'D' in s.upper(): # Possible double precision number
try:
return string_to_double_precision_float(s)
except ValueError:
raise ValueError(
"The string '{0}' does not corresponds to a double precision number!".format(s))
else:
return float(s) | Convert a string to corresponding single or double precision scientific number.
:param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number
:return: a float or raise an error
.. doctest::
>>> string_to_general_float('1.0D-5')
1e-05
>>> string_to_general_float('1Dx')
Traceback (most recent call last):
...
ValueError: The string '1Dx' does not corresponds to a double precision number!
>>> string_to_general_float('.8d234')
8e+233
>>> string_to_general_float('0.1')
0.1 | Below is the the instruction that describes the task:
### Input:
Convert a string to corresponding single or double precision scientific number.
:param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number
:return: a float or raise an error
.. doctest::
>>> string_to_general_float('1.0D-5')
1e-05
>>> string_to_general_float('1Dx')
Traceback (most recent call last):
...
ValueError: The string '1Dx' does not corresponds to a double precision number!
>>> string_to_general_float('.8d234')
8e+233
>>> string_to_general_float('0.1')
0.1
### Response:
def string_to_general_float(s: str) -> float:
"""
Convert a string to corresponding single or double precision scientific number.
:param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number
:return: a float or raise an error
.. doctest::
>>> string_to_general_float('1.0D-5')
1e-05
>>> string_to_general_float('1Dx')
Traceback (most recent call last):
...
ValueError: The string '1Dx' does not corresponds to a double precision number!
>>> string_to_general_float('.8d234')
8e+233
>>> string_to_general_float('0.1')
0.1
"""
if 'D' in s.upper(): # Possible double precision number
try:
return string_to_double_precision_float(s)
except ValueError:
raise ValueError(
"The string '{0}' does not corresponds to a double precision number!".format(s))
else:
return float(s) |
def rgb_color_list_to_hex(color_list):
"""
Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list
"""
color_list_rgb = [[int(x*255) for x in c[0:3]] for c in color_list]
color_list_hex = ['#{:02X}{:02X}{:02X}'.format(rgb[0], rgb[1], rgb[2]) for rgb in color_list_rgb]
return color_list_hex | Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list | Below is the the instruction that describes the task:
### Input:
Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list
### Response:
def rgb_color_list_to_hex(color_list):
"""
Convert a list of RGBa colors to a list of hexadecimal color codes.
Parameters
----------
color_list : list
the list of RGBa colors
Returns
-------
color_list_hex : list
"""
color_list_rgb = [[int(x*255) for x in c[0:3]] for c in color_list]
color_list_hex = ['#{:02X}{:02X}{:02X}'.format(rgb[0], rgb[1], rgb[2]) for rgb in color_list_rgb]
return color_list_hex |
def __remove_actions(self):
"""
Removes actions.
"""
LOGGER.debug("> Removing '{0}' Component actions.".format(self.__class__.__name__))
remove_project_action = "Actions|Umbra|Components|factory.script_editor|&File|Remove Project"
self.__script_editor.command_menu.removeAction(self.__engine.actions_manager.get_action(remove_project_action))
self.__engine.actions_manager.unregister_action(remove_project_action) | Removes actions. | Below is the the instruction that describes the task:
### Input:
Removes actions.
### Response:
def __remove_actions(self):
"""
Removes actions.
"""
LOGGER.debug("> Removing '{0}' Component actions.".format(self.__class__.__name__))
remove_project_action = "Actions|Umbra|Components|factory.script_editor|&File|Remove Project"
self.__script_editor.command_menu.removeAction(self.__engine.actions_manager.get_action(remove_project_action))
self.__engine.actions_manager.unregister_action(remove_project_action) |
def from_file(cls, fname, version=None, require_https=True):
"""
Create a Hive object based on JSON located in a local file.
"""
if os.path.exists(fname):
with open(fname) as hive_file:
return cls(**json.load(hive_file)).from_version(version, require_https=require_https)
else:
raise MissingHive(fname) | Create a Hive object based on JSON located in a local file. | Below is the the instruction that describes the task:
### Input:
Create a Hive object based on JSON located in a local file.
### Response:
def from_file(cls, fname, version=None, require_https=True):
"""
Create a Hive object based on JSON located in a local file.
"""
if os.path.exists(fname):
with open(fname) as hive_file:
return cls(**json.load(hive_file)).from_version(version, require_https=require_https)
else:
raise MissingHive(fname) |
def _ProcessArtifactFilesSource(self, source):
"""Get artifact responses, extract paths and send corresponding files."""
if source.path_type != rdf_paths.PathSpec.PathType.OS:
raise ValueError("Only supported path type is OS.")
# TODO(user): Check paths for GlobExpressions.
# If it contains a * then FileFinder will interpret it as GlobExpression and
# expand it. FileFinderArgs needs an option to treat paths literally.
paths = []
pathspec_attribute = source.base_source.attributes.get("pathspec_attribute")
for source_result_list in self._ProcessSources(
source.artifact_sources, parser_factory=None):
for response in source_result_list:
path = _ExtractPath(response, pathspec_attribute)
if path is not None:
paths.append(path)
file_finder_action = rdf_file_finder.FileFinderAction.Download()
request = rdf_file_finder.FileFinderArgs(
paths=paths, pathtype=source.path_type, action=file_finder_action)
action = file_finder.FileFinderOSFromClient
yield action, request | Get artifact responses, extract paths and send corresponding files. | Below is the the instruction that describes the task:
### Input:
Get artifact responses, extract paths and send corresponding files.
### Response:
def _ProcessArtifactFilesSource(self, source):
"""Get artifact responses, extract paths and send corresponding files."""
if source.path_type != rdf_paths.PathSpec.PathType.OS:
raise ValueError("Only supported path type is OS.")
# TODO(user): Check paths for GlobExpressions.
# If it contains a * then FileFinder will interpret it as GlobExpression and
# expand it. FileFinderArgs needs an option to treat paths literally.
paths = []
pathspec_attribute = source.base_source.attributes.get("pathspec_attribute")
for source_result_list in self._ProcessSources(
source.artifact_sources, parser_factory=None):
for response in source_result_list:
path = _ExtractPath(response, pathspec_attribute)
if path is not None:
paths.append(path)
file_finder_action = rdf_file_finder.FileFinderAction.Download()
request = rdf_file_finder.FileFinderArgs(
paths=paths, pathtype=source.path_type, action=file_finder_action)
action = file_finder.FileFinderOSFromClient
yield action, request |
def p_static_non_empty_array_pair_list_pair(p):
'''static_non_empty_array_pair_list : static_non_empty_array_pair_list COMMA static_scalar DOUBLE_ARROW static_scalar
| static_scalar DOUBLE_ARROW static_scalar'''
if len(p) == 6:
p[0] = p[1] + [ast.ArrayElement(p[3], p[5], False, lineno=p.lineno(2))]
else:
p[0] = [ast.ArrayElement(p[1], p[3], False, lineno=p.lineno(2))] | static_non_empty_array_pair_list : static_non_empty_array_pair_list COMMA static_scalar DOUBLE_ARROW static_scalar
| static_scalar DOUBLE_ARROW static_scalar | Below is the the instruction that describes the task:
### Input:
static_non_empty_array_pair_list : static_non_empty_array_pair_list COMMA static_scalar DOUBLE_ARROW static_scalar
| static_scalar DOUBLE_ARROW static_scalar
### Response:
def p_static_non_empty_array_pair_list_pair(p):
'''static_non_empty_array_pair_list : static_non_empty_array_pair_list COMMA static_scalar DOUBLE_ARROW static_scalar
| static_scalar DOUBLE_ARROW static_scalar'''
if len(p) == 6:
p[0] = p[1] + [ast.ArrayElement(p[3], p[5], False, lineno=p.lineno(2))]
else:
p[0] = [ast.ArrayElement(p[1], p[3], False, lineno=p.lineno(2))] |
def _client(self, host, port, unix_socket, auth):
"""Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis
"""
db = int(self.config['db'])
timeout = int(self.config['timeout'])
try:
cli = redis.Redis(host=host, port=port,
db=db, socket_timeout=timeout, password=auth,
unix_socket_path=unix_socket)
cli.ping()
return cli
except Exception as ex:
self.log.error("RedisCollector: failed to connect to %s:%i. %s.",
unix_socket or host, port, ex) | Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis | Below is the the instruction that describes the task:
### Input:
Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis
### Response:
def _client(self, host, port, unix_socket, auth):
"""Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis
"""
db = int(self.config['db'])
timeout = int(self.config['timeout'])
try:
cli = redis.Redis(host=host, port=port,
db=db, socket_timeout=timeout, password=auth,
unix_socket_path=unix_socket)
cli.ping()
return cli
except Exception as ex:
self.log.error("RedisCollector: failed to connect to %s:%i. %s.",
unix_socket or host, port, ex) |
def _parse_function_expression(function_specification):
"""
Parse a complex function expression like:
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
and return a composite function instance
:param function_specification:
:return: a composite function instance
"""
# NOTE FOR SECURITY
# This function has some security concerns. Security issues could arise if the user tries to read a model
# file which has been maliciously formatted to contain harmful code. In this function we close all the doors
# to a similar attack, except for those attacks which assume that the user has full access to a python environment.
# Indeed, if that is the case, then the user can already do harm to the system, and so there is no point in
# safeguard that from here. For example, the user could format a subclass of the Function class which perform
# malicious operations in the constructor, add that to the dictionary of known functions, and then interpret
# it with this code. However, if the user can instance malicious classes, then why would he use astromodels to
# carry out the attack? Instead, what we explicitly check is the content of the function_specification string,
# so that it cannot by itself do any harm (by for example containing instructions such as os.remove).
# This can be a arbitrarily complex specification, like
# ((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
# Use regular expressions to extract the set of functions like function_name{number},
# then build the set of unique functions by using the constructor set()
unique_functions = set(re.findall(r'\b([a-zA-Z0-9_]+)\{([0-9]?)\}',function_specification))
# NB: unique functions is a set like:
# {('powerlaw', '1'), ('sin', '2')}
# Create instances of the unique functions
instances = {}
# Loop over the unique functions and create instances
for (unique_function, number) in unique_functions:
complete_function_specification = "%s{%s}" % (unique_function, number)
# As first safety measure, check that the unique function is in the dictionary of _known_functions.
# This could still be easily hacked, so it won't be the only check
if unique_function in _known_functions:
# Get the function class and check that it is indeed a proper Function class
function_class = _known_functions[unique_function]
if issubclass(function_class, Function):
# Ok, let's create the instance
instance = function_class()
# Append the instance to the list
instances[complete_function_specification] = instance
else:
raise FunctionDefinitionError("The function specification %s does not contain a proper function"
% unique_function )
else:
# It might be a template
# This import is here to avoid circular dependency between this module and TemplateModel.py
import astromodels.functions.template_model
try:
instance = astromodels.functions.template_model.TemplateModel(unique_function)
except astromodels.functions.template_model.MissingDataFile:
# It's not a template
raise UnknownFunction("Function %s in expression %s is unknown. If this is a template model, you are "
"probably missing the data file" % (unique_function, function_specification))
else:
# It's a template
instances[complete_function_specification] = instance
# Check that we have found at least one instance.
if len(instances)==0:
raise DesignViolation("No known function in function specification")
# The following presents a slight security problem if the model file that has been parsed comes from an untrusted
# source. Indeed, the use of eval could make possible to execute things like os.remove.
# In order to avoid this, first we substitute the function instances with numbers and remove the operators like
# +,-,/ and so on. Then we try to execute the string with ast.literal_eval, which according to its documentation:
# Safely evaluate an expression node or a Unicode or Latin-1 encoded string containing a Python literal or
# container display. The string or node provided may only consist of the following Python literal structures:
# strings, numbers, tuples, lists, dicts, booleans, and None.This can be used for safely evaluating strings
# containing Python values from untrusted sources without the need to parse the values oneself.
# It is not capable of evaluating arbitrarily complex expressions, for example involving operators or indexing.
# If literal_eval cannot parse the string, it means that it contains unsafe input.
# Create a copy of the function_specification
string_for_literal_eval = function_specification
# Remove from the function_specification all the known operators and function_expressions, and substitute them
# with a 0 and a space
# Let's start from the function expression
for function_expression in instances.keys():
string_for_literal_eval = string_for_literal_eval.replace(function_expression, '0 ')
# Now remove all the known operators
for operator in _operations.keys():
string_for_literal_eval = string_for_literal_eval.replace(operator,'0 ')
# The string at this point should contains only numbers and parenthesis separated by one or more spaces
if re.match('''([a-zA-Z]+)''', string_for_literal_eval):
raise DesignViolation("Extraneous input in function specification")
# By using split() we separate all the numbers and parenthesis in a list, then we join them
# with a comma, to end up with a comma-separated list of parenthesis and numbers like:
# ((((0,0,(0,0,3)),0,(0,0,25)),0,(0,0,16)),0,(0,0,0,3.0))
# This string can be parsed by literal_eval as a tuple containing other tuples, which is fine.
# If the user has inserted some malicious content, like os.remove or more weird stuff like code objects,
# the parsing will fail
string_for_literal_eval = ",".join(string_for_literal_eval.split())
#print(string_for_literal_eval)
# At this point the string should be just a comma separated list of numbers
# Now try to execute the string
try:
ast.literal_eval(string_for_literal_eval)
except (ValueError, SyntaxError):
raise DesignViolation("The given expression is not a valid function expression")
else:
# The expression is safe, let's eval it
# First substitute the reference to the functions (like 'powerlaw{1}') with a string
# corresponding to the instance dictionary
sanitized_function_specification = function_specification
for function_expression in instances.keys():
sanitized_function_specification = sanitized_function_specification.replace(function_expression,
'instances["%s"]' %
function_expression)
# Now eval it. For safety measure, I remove all globals, and the only local is the 'instances' dictionary
composite_function = eval(sanitized_function_specification, {}, {'instances': instances})
return composite_function | Parse a complex function expression like:
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
and return a composite function instance
:param function_specification:
:return: a composite function instance | Below is the the instruction that describes the task:
### Input:
Parse a complex function expression like:
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
and return a composite function instance
:param function_specification:
:return: a composite function instance
### Response:
def _parse_function_expression(function_specification):
"""
Parse a complex function expression like:
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
and return a composite function instance
:param function_specification:
:return: a composite function instance
"""
# NOTE FOR SECURITY
# This function has some security concerns. Security issues could arise if the user tries to read a model
# file which has been maliciously formatted to contain harmful code. In this function we close all the doors
# to a similar attack, except for those attacks which assume that the user has full access to a python environment.
# Indeed, if that is the case, then the user can already do harm to the system, and so there is no point in
# safeguard that from here. For example, the user could format a subclass of the Function class which perform
# malicious operations in the constructor, add that to the dictionary of known functions, and then interpret
# it with this code. However, if the user can instance malicious classes, then why would he use astromodels to
# carry out the attack? Instead, what we explicitly check is the content of the function_specification string,
# so that it cannot by itself do any harm (by for example containing instructions such as os.remove).
# This can be a arbitrarily complex specification, like
# ((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
# Use regular expressions to extract the set of functions like function_name{number},
# then build the set of unique functions by using the constructor set()
unique_functions = set(re.findall(r'\b([a-zA-Z0-9_]+)\{([0-9]?)\}',function_specification))
# NB: unique functions is a set like:
# {('powerlaw', '1'), ('sin', '2')}
# Create instances of the unique functions
instances = {}
# Loop over the unique functions and create instances
for (unique_function, number) in unique_functions:
complete_function_specification = "%s{%s}" % (unique_function, number)
# As first safety measure, check that the unique function is in the dictionary of _known_functions.
# This could still be easily hacked, so it won't be the only check
if unique_function in _known_functions:
# Get the function class and check that it is indeed a proper Function class
function_class = _known_functions[unique_function]
if issubclass(function_class, Function):
# Ok, let's create the instance
instance = function_class()
# Append the instance to the list
instances[complete_function_specification] = instance
else:
raise FunctionDefinitionError("The function specification %s does not contain a proper function"
% unique_function )
else:
# It might be a template
# This import is here to avoid circular dependency between this module and TemplateModel.py
import astromodels.functions.template_model
try:
instance = astromodels.functions.template_model.TemplateModel(unique_function)
except astromodels.functions.template_model.MissingDataFile:
# It's not a template
raise UnknownFunction("Function %s in expression %s is unknown. If this is a template model, you are "
"probably missing the data file" % (unique_function, function_specification))
else:
# It's a template
instances[complete_function_specification] = instance
# Check that we have found at least one instance.
if len(instances)==0:
raise DesignViolation("No known function in function specification")
# The following presents a slight security problem if the model file that has been parsed comes from an untrusted
# source. Indeed, the use of eval could make possible to execute things like os.remove.
# In order to avoid this, first we substitute the function instances with numbers and remove the operators like
# +,-,/ and so on. Then we try to execute the string with ast.literal_eval, which according to its documentation:
# Safely evaluate an expression node or a Unicode or Latin-1 encoded string containing a Python literal or
# container display. The string or node provided may only consist of the following Python literal structures:
# strings, numbers, tuples, lists, dicts, booleans, and None.This can be used for safely evaluating strings
# containing Python values from untrusted sources without the need to parse the values oneself.
# It is not capable of evaluating arbitrarily complex expressions, for example involving operators or indexing.
# If literal_eval cannot parse the string, it means that it contains unsafe input.
# Create a copy of the function_specification
string_for_literal_eval = function_specification
# Remove from the function_specification all the known operators and function_expressions, and substitute them
# with a 0 and a space
# Let's start from the function expression
for function_expression in instances.keys():
string_for_literal_eval = string_for_literal_eval.replace(function_expression, '0 ')
# Now remove all the known operators
for operator in _operations.keys():
string_for_literal_eval = string_for_literal_eval.replace(operator,'0 ')
# The string at this point should contains only numbers and parenthesis separated by one or more spaces
if re.match('''([a-zA-Z]+)''', string_for_literal_eval):
raise DesignViolation("Extraneous input in function specification")
# By using split() we separate all the numbers and parenthesis in a list, then we join them
# with a comma, to end up with a comma-separated list of parenthesis and numbers like:
# ((((0,0,(0,0,3)),0,(0,0,25)),0,(0,0,16)),0,(0,0,0,3.0))
# This string can be parsed by literal_eval as a tuple containing other tuples, which is fine.
# If the user has inserted some malicious content, like os.remove or more weird stuff like code objects,
# the parsing will fail
string_for_literal_eval = ",".join(string_for_literal_eval.split())
#print(string_for_literal_eval)
# At this point the string should be just a comma separated list of numbers
# Now try to execute the string
try:
ast.literal_eval(string_for_literal_eval)
except (ValueError, SyntaxError):
raise DesignViolation("The given expression is not a valid function expression")
else:
# The expression is safe, let's eval it
# First substitute the reference to the functions (like 'powerlaw{1}') with a string
# corresponding to the instance dictionary
sanitized_function_specification = function_specification
for function_expression in instances.keys():
sanitized_function_specification = sanitized_function_specification.replace(function_expression,
'instances["%s"]' %
function_expression)
# Now eval it. For safety measure, I remove all globals, and the only local is the 'instances' dictionary
composite_function = eval(sanitized_function_specification, {}, {'instances': instances})
return composite_function |
def manage_fits(list_of_frame):
"""Manage a list of FITS resources"""
import astropy.io.fits as fits
import numina.types.dataframe as df
refs = []
for frame in list_of_frame:
if isinstance(frame, str):
ref = fits.open(frame)
refs.append(ref)
elif isinstance(frame, fits.HDUList):
refs.append(frame)
elif isinstance(frame, df.DataFrame):
ref = frame.open()
refs.append(ref)
else:
refs.append(frame)
try:
yield refs
finally:
# release
for obj in refs:
obj.close() | Manage a list of FITS resources | Below is the the instruction that describes the task:
### Input:
Manage a list of FITS resources
### Response:
def manage_fits(list_of_frame):
"""Manage a list of FITS resources"""
import astropy.io.fits as fits
import numina.types.dataframe as df
refs = []
for frame in list_of_frame:
if isinstance(frame, str):
ref = fits.open(frame)
refs.append(ref)
elif isinstance(frame, fits.HDUList):
refs.append(frame)
elif isinstance(frame, df.DataFrame):
ref = frame.open()
refs.append(ref)
else:
refs.append(frame)
try:
yield refs
finally:
# release
for obj in refs:
obj.close() |
def scroll_down(lines=1, file=sys.stdout):
""" Scroll the whole page down a number of lines, new lines are added to
the top.
Esc[<lines>T
"""
scroll.down(lines).write(file=file) | Scroll the whole page down a number of lines, new lines are added to
the top.
Esc[<lines>T | Below is the the instruction that describes the task:
### Input:
Scroll the whole page down a number of lines, new lines are added to
the top.
Esc[<lines>T
### Response:
def scroll_down(lines=1, file=sys.stdout):
""" Scroll the whole page down a number of lines, new lines are added to
the top.
Esc[<lines>T
"""
scroll.down(lines).write(file=file) |
def publish_table_notification(table_key, message, message_types, subject=None):
""" Publish a notification for a specific table
:type table_key: str
:param table_key: Table configuration option key name
:type message: str
:param message: Message to send via SNS
:type message_types: list
:param message_types:
List with types:
- scale-up
- scale-down
- high-throughput-alarm
- low-throughput-alarm
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
"""
topic = get_table_option(table_key, 'sns_topic_arn')
if not topic:
return
for message_type in message_types:
if message_type in get_table_option(table_key, 'sns_message_types'):
__publish(topic, message, subject)
return | Publish a notification for a specific table
:type table_key: str
:param table_key: Table configuration option key name
:type message: str
:param message: Message to send via SNS
:type message_types: list
:param message_types:
List with types:
- scale-up
- scale-down
- high-throughput-alarm
- low-throughput-alarm
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None | Below is the the instruction that describes the task:
### Input:
Publish a notification for a specific table
:type table_key: str
:param table_key: Table configuration option key name
:type message: str
:param message: Message to send via SNS
:type message_types: list
:param message_types:
List with types:
- scale-up
- scale-down
- high-throughput-alarm
- low-throughput-alarm
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
### Response:
def publish_table_notification(table_key, message, message_types, subject=None):
""" Publish a notification for a specific table
:type table_key: str
:param table_key: Table configuration option key name
:type message: str
:param message: Message to send via SNS
:type message_types: list
:param message_types:
List with types:
- scale-up
- scale-down
- high-throughput-alarm
- low-throughput-alarm
:type subject: str
:param subject: Subject to use for e-mail notifications
:returns: None
"""
topic = get_table_option(table_key, 'sns_topic_arn')
if not topic:
return
for message_type in message_types:
if message_type in get_table_option(table_key, 'sns_message_types'):
__publish(topic, message, subject)
return |
def get_cookies_for_class(session, class_name,
cookies_file=None,
username=None,
password=None):
"""
Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed.
"""
if cookies_file:
cookies = find_cookies_for_class(cookies_file, class_name)
session.cookies.update(cookies)
logging.info('Loaded cookies from %s', cookies_file)
else:
cookies = get_cookies_from_cache(username)
session.cookies.update(cookies)
if validate_cookies(session, class_name):
logging.info('Already authenticated.')
else:
get_authentication_cookies(session, class_name, username, password)
write_cookies_to_cache(session.cookies, username) | Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed. | Below is the the instruction that describes the task:
### Input:
Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed.
### Response:
def get_cookies_for_class(session, class_name,
cookies_file=None,
username=None,
password=None):
"""
Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed.
"""
if cookies_file:
cookies = find_cookies_for_class(cookies_file, class_name)
session.cookies.update(cookies)
logging.info('Loaded cookies from %s', cookies_file)
else:
cookies = get_cookies_from_cache(username)
session.cookies.update(cookies)
if validate_cookies(session, class_name):
logging.info('Already authenticated.')
else:
get_authentication_cookies(session, class_name, username, password)
write_cookies_to_cache(session.cookies, username) |
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30 * 44100, hop=15 * 44100,
compute_permutation=False):
"""
BSS Eval v3 bss_eval_images_framewise
Framewise computation of bss_eval_images.
Wrapper to ``bss_eval`` with the right parameters.
"""
return bss_eval(
reference_sources, estimated_sources,
window=window, hop=hop,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=False
) | BSS Eval v3 bss_eval_images_framewise
Framewise computation of bss_eval_images.
Wrapper to ``bss_eval`` with the right parameters. | Below is the the instruction that describes the task:
### Input:
BSS Eval v3 bss_eval_images_framewise
Framewise computation of bss_eval_images.
Wrapper to ``bss_eval`` with the right parameters.
### Response:
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30 * 44100, hop=15 * 44100,
compute_permutation=False):
"""
BSS Eval v3 bss_eval_images_framewise
Framewise computation of bss_eval_images.
Wrapper to ``bss_eval`` with the right parameters.
"""
return bss_eval(
reference_sources, estimated_sources,
window=window, hop=hop,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=False
) |
def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"):
"""Return value of this parameter as a string (or in native format
if native is non-zero.)"""
if field: return self._getField(field,native=native,prompt=prompt)
# may prompt for value if prompt flag is set
#XXX should change _optionalPrompt so we prompt for each element of
#XXX the array separately? I think array parameters are
#XXX not useful as non-hidden params.
if prompt: self._optionalPrompt(mode)
if index is not None:
sumindex = self._sumindex(index)
try:
if native:
return self.value[sumindex]
else:
return self.toString(self.value[sumindex])
except IndexError:
# should never happen
raise SyntaxError("Illegal index [" + repr(sumindex) +
"] for array parameter " + self.name)
elif native:
# return object itself for an array because it is
# indexable, can have values assigned, etc.
return self
else:
# return blank-separated string of values for array
return str(self) | Return value of this parameter as a string (or in native format
if native is non-zero.) | Below is the the instruction that describes the task:
### Input:
Return value of this parameter as a string (or in native format
if native is non-zero.)
### Response:
def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"):
"""Return value of this parameter as a string (or in native format
if native is non-zero.)"""
if field: return self._getField(field,native=native,prompt=prompt)
# may prompt for value if prompt flag is set
#XXX should change _optionalPrompt so we prompt for each element of
#XXX the array separately? I think array parameters are
#XXX not useful as non-hidden params.
if prompt: self._optionalPrompt(mode)
if index is not None:
sumindex = self._sumindex(index)
try:
if native:
return self.value[sumindex]
else:
return self.toString(self.value[sumindex])
except IndexError:
# should never happen
raise SyntaxError("Illegal index [" + repr(sumindex) +
"] for array parameter " + self.name)
elif native:
# return object itself for an array because it is
# indexable, can have values assigned, etc.
return self
else:
# return blank-separated string of values for array
return str(self) |
def post_request(self, request, response):
# type: (BaseHttpRequest, HttpResponse) -> HttpResponse
"""
Post-request hook to allow CORS headers to responses.
"""
if request.method != api.Method.OPTIONS:
response.headers.update(self.request_headers(request))
return response | Post-request hook to allow CORS headers to responses. | Below is the the instruction that describes the task:
### Input:
Post-request hook to allow CORS headers to responses.
### Response:
def post_request(self, request, response):
# type: (BaseHttpRequest, HttpResponse) -> HttpResponse
"""
Post-request hook to allow CORS headers to responses.
"""
if request.method != api.Method.OPTIONS:
response.headers.update(self.request_headers(request))
return response |
def generate_span_requests(self, span_datas):
"""Span request generator.
:type span_datas: list of
:class:`~opencensus.trace.span_data.SpanData`
:param span_datas: SpanData tuples to convert to protobuf spans
and send to opensensusd agent
:rtype: list of
`~gen.opencensus.agent.trace.v1.trace_service_pb2.ExportTraceServiceRequest`
:returns: List of span export requests.
"""
pb_spans = [
utils.translate_to_trace_proto(span_data)
for span_data in span_datas
]
# TODO: send node once per channel
yield trace_service_pb2.ExportTraceServiceRequest(
node=self.node,
spans=pb_spans) | Span request generator.
:type span_datas: list of
:class:`~opencensus.trace.span_data.SpanData`
:param span_datas: SpanData tuples to convert to protobuf spans
and send to opensensusd agent
:rtype: list of
`~gen.opencensus.agent.trace.v1.trace_service_pb2.ExportTraceServiceRequest`
:returns: List of span export requests. | Below is the the instruction that describes the task:
### Input:
Span request generator.
:type span_datas: list of
:class:`~opencensus.trace.span_data.SpanData`
:param span_datas: SpanData tuples to convert to protobuf spans
and send to opensensusd agent
:rtype: list of
`~gen.opencensus.agent.trace.v1.trace_service_pb2.ExportTraceServiceRequest`
:returns: List of span export requests.
### Response:
def generate_span_requests(self, span_datas):
"""Span request generator.
:type span_datas: list of
:class:`~opencensus.trace.span_data.SpanData`
:param span_datas: SpanData tuples to convert to protobuf spans
and send to opensensusd agent
:rtype: list of
`~gen.opencensus.agent.trace.v1.trace_service_pb2.ExportTraceServiceRequest`
:returns: List of span export requests.
"""
pb_spans = [
utils.translate_to_trace_proto(span_data)
for span_data in span_datas
]
# TODO: send node once per channel
yield trace_service_pb2.ExportTraceServiceRequest(
node=self.node,
spans=pb_spans) |
def get_metrics(awsclient, name):
"""Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
"""
metrics = ['Duration', 'Errors', 'Invocations', 'Throttles']
client_cw = awsclient.get_client('cloudwatch')
for metric in metrics:
response = client_cw.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName=metric,
Dimensions=[
{
'Name': 'FunctionName',
'Value': name
},
],
# StartTime=datetime.now() + timedelta(days=-1),
# EndTime=datetime.now(),
StartTime=maya.now().subtract(days=1).datetime(),
EndTime=maya.now().datetime(),
Period=3600,
Statistics=[
'Sum',
],
Unit=unit(metric)
)
log.info('\t%s %s' % (metric,
repr(aggregate_datapoints(response['Datapoints']))))
return 0 | Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code | Below is the the instruction that describes the task:
### Input:
Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
### Response:
def get_metrics(awsclient, name):
"""Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
"""
metrics = ['Duration', 'Errors', 'Invocations', 'Throttles']
client_cw = awsclient.get_client('cloudwatch')
for metric in metrics:
response = client_cw.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName=metric,
Dimensions=[
{
'Name': 'FunctionName',
'Value': name
},
],
# StartTime=datetime.now() + timedelta(days=-1),
# EndTime=datetime.now(),
StartTime=maya.now().subtract(days=1).datetime(),
EndTime=maya.now().datetime(),
Period=3600,
Statistics=[
'Sum',
],
Unit=unit(metric)
)
log.info('\t%s %s' % (metric,
repr(aggregate_datapoints(response['Datapoints']))))
return 0 |
def _sample(self, position, trajectory_length, stepsize, lsteps=None):
"""
Runs a single sampling iteration to return a sample
"""
# Resampling momentum
momentum = np.reshape(np.random.normal(0, 1, len(position)), position.shape)
# position_m here will be the previous sampled value of position
position_bar, momentum_bar = position.copy(), momentum
# Number of steps L to simulate dynamics
if lsteps is None:
lsteps = int(max(1, round(trajectory_length / stepsize, 0)))
grad_bar, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
for _ in range(lsteps):
position_bar, momentum_bar, grad_bar =\
self.simulate_dynamics(self.model, position_bar, momentum_bar,
stepsize, self.grad_log_pdf, grad_bar).get_proposed_values()
acceptance_prob = self._acceptance_prob(position, position_bar, momentum, momentum_bar)
# Metropolis acceptance probability
alpha = min(1, acceptance_prob)
# Accept or reject the new proposed value of position, i.e position_bar
if np.random.rand() < alpha:
position = position_bar.copy()
self.accepted_proposals += 1.0
return position, alpha | Runs a single sampling iteration to return a sample | Below is the the instruction that describes the task:
### Input:
Runs a single sampling iteration to return a sample
### Response:
def _sample(self, position, trajectory_length, stepsize, lsteps=None):
"""
Runs a single sampling iteration to return a sample
"""
# Resampling momentum
momentum = np.reshape(np.random.normal(0, 1, len(position)), position.shape)
# position_m here will be the previous sampled value of position
position_bar, momentum_bar = position.copy(), momentum
# Number of steps L to simulate dynamics
if lsteps is None:
lsteps = int(max(1, round(trajectory_length / stepsize, 0)))
grad_bar, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
for _ in range(lsteps):
position_bar, momentum_bar, grad_bar =\
self.simulate_dynamics(self.model, position_bar, momentum_bar,
stepsize, self.grad_log_pdf, grad_bar).get_proposed_values()
acceptance_prob = self._acceptance_prob(position, position_bar, momentum, momentum_bar)
# Metropolis acceptance probability
alpha = min(1, acceptance_prob)
# Accept or reject the new proposed value of position, i.e position_bar
if np.random.rand() < alpha:
position = position_bar.copy()
self.accepted_proposals += 1.0
return position, alpha |
def python_2_nonzero_compatible(klass):
"""
Adds a `__nonzero__()` method to classes that define a `__bool__()` method,
so boolean conversion works in Python 2. Has no effect in Python 3.
:param klass: The class to modify. Must define `__bool__()`.
:return: The possibly patched class.
"""
if six.PY2:
if '__bool__' not in klass.__dict__:
raise ValueError(
'@python_2_nonzero_compatible cannot be applied to {0} because '
'it doesn\'t define __bool__().'.format(klass.__name__))
klass.__nonzero__ = klass.__bool__
return klass | Adds a `__nonzero__()` method to classes that define a `__bool__()` method,
so boolean conversion works in Python 2. Has no effect in Python 3.
:param klass: The class to modify. Must define `__bool__()`.
:return: The possibly patched class. | Below is the the instruction that describes the task:
### Input:
Adds a `__nonzero__()` method to classes that define a `__bool__()` method,
so boolean conversion works in Python 2. Has no effect in Python 3.
:param klass: The class to modify. Must define `__bool__()`.
:return: The possibly patched class.
### Response:
def python_2_nonzero_compatible(klass):
"""
Adds a `__nonzero__()` method to classes that define a `__bool__()` method,
so boolean conversion works in Python 2. Has no effect in Python 3.
:param klass: The class to modify. Must define `__bool__()`.
:return: The possibly patched class.
"""
if six.PY2:
if '__bool__' not in klass.__dict__:
raise ValueError(
'@python_2_nonzero_compatible cannot be applied to {0} because '
'it doesn\'t define __bool__().'.format(klass.__name__))
klass.__nonzero__ = klass.__bool__
return klass |
def simplex_determine_leaving_arc(self, t, k, l):
'''
API:
simplex_determine_leaving_arc(self, t, k, l)
Description:
Determines and returns the leaving arc.
Input:
t: current spanning tree solution.
k: tail of the entering arc.
l: head of the entering arc.
Return:
Returns the tuple that represents leaving arc, capacity of the
cycle and cycle.
'''
# k,l are the first two elements of the cycle
cycle = self.simplex_identify_cycle(t, k, l)
flow_kl = self.get_edge_attr(k, l, 'flow')
capacity_kl = self.get_edge_attr(k, l, 'capacity')
min_capacity = capacity_kl
# check if k,l is in U or L
if flow_kl==capacity_kl:
# l,k will be the last two elements
cycle.reverse()
n = len(cycle)
index = 0
# determine last blocking arc
t.add_edge(k, l)
tel = t.get_edge_list()
while index < (n-1):
if (cycle[index], cycle[index+1]) in tel:
flow = self.edge_attr[(cycle[index], cycle[index+1])]['flow']
capacity = \
self.edge_attr[(cycle[index],cycle[index+1])]['capacity']
if min_capacity >= (capacity-flow):
candidate = (cycle[index], cycle[index+1])
min_capacity = capacity-flow
else:
flow = self.edge_attr[(cycle[index+1], cycle[index])]['flow']
if min_capacity >= flow:
candidate = (cycle[index+1], cycle[index])
min_capacity = flow
index += 1
# check arc (cycle[n-1], cycle[0])
if (cycle[n-1], cycle[0]) in tel:
flow = self.edge_attr[(cycle[n-1], cycle[0])]['flow']
capacity = self.edge_attr[(cycle[n-1], cycle[0])]['capacity']
if min_capacity >= (capacity-flow):
candidate = (cycle[n-1], cycle[0])
min_capacity = capacity-flow
else:
flow = self.edge_attr[(cycle[0], cycle[n-1])]['flow']
if min_capacity >= flow:
candidate = (cycle[0], cycle[n-1])
min_capacity = flow
return (candidate, min_capacity, cycle) | API:
simplex_determine_leaving_arc(self, t, k, l)
Description:
Determines and returns the leaving arc.
Input:
t: current spanning tree solution.
k: tail of the entering arc.
l: head of the entering arc.
Return:
Returns the tuple that represents leaving arc, capacity of the
cycle and cycle. | Below is the the instruction that describes the task:
### Input:
API:
simplex_determine_leaving_arc(self, t, k, l)
Description:
Determines and returns the leaving arc.
Input:
t: current spanning tree solution.
k: tail of the entering arc.
l: head of the entering arc.
Return:
Returns the tuple that represents leaving arc, capacity of the
cycle and cycle.
### Response:
def simplex_determine_leaving_arc(self, t, k, l):
'''
API:
simplex_determine_leaving_arc(self, t, k, l)
Description:
Determines and returns the leaving arc.
Input:
t: current spanning tree solution.
k: tail of the entering arc.
l: head of the entering arc.
Return:
Returns the tuple that represents leaving arc, capacity of the
cycle and cycle.
'''
# k,l are the first two elements of the cycle
cycle = self.simplex_identify_cycle(t, k, l)
flow_kl = self.get_edge_attr(k, l, 'flow')
capacity_kl = self.get_edge_attr(k, l, 'capacity')
min_capacity = capacity_kl
# check if k,l is in U or L
if flow_kl==capacity_kl:
# l,k will be the last two elements
cycle.reverse()
n = len(cycle)
index = 0
# determine last blocking arc
t.add_edge(k, l)
tel = t.get_edge_list()
while index < (n-1):
if (cycle[index], cycle[index+1]) in tel:
flow = self.edge_attr[(cycle[index], cycle[index+1])]['flow']
capacity = \
self.edge_attr[(cycle[index],cycle[index+1])]['capacity']
if min_capacity >= (capacity-flow):
candidate = (cycle[index], cycle[index+1])
min_capacity = capacity-flow
else:
flow = self.edge_attr[(cycle[index+1], cycle[index])]['flow']
if min_capacity >= flow:
candidate = (cycle[index+1], cycle[index])
min_capacity = flow
index += 1
# check arc (cycle[n-1], cycle[0])
if (cycle[n-1], cycle[0]) in tel:
flow = self.edge_attr[(cycle[n-1], cycle[0])]['flow']
capacity = self.edge_attr[(cycle[n-1], cycle[0])]['capacity']
if min_capacity >= (capacity-flow):
candidate = (cycle[n-1], cycle[0])
min_capacity = capacity-flow
else:
flow = self.edge_attr[(cycle[0], cycle[n-1])]['flow']
if min_capacity >= flow:
candidate = (cycle[0], cycle[n-1])
min_capacity = flow
return (candidate, min_capacity, cycle) |
def read(cls, root_tex_path):
"""Construct an `LsstLatexDoc` instance by reading and parsing the
LaTeX source.
Parameters
----------
root_tex_path : `str`
Path to the LaTeX source on the filesystem. For multi-file LaTeX
projects this should be the path to the root document.
Notes
-----
This method implements the following pipeline:
1. `lsstprojectmeta.tex.normalizer.read_tex_file`
2. `lsstprojectmeta.tex.scraper.get_macros`
3. `lsstprojectmeta.tex.normalizer.replace_macros`
Thus ``input`` and ``includes`` are resolved along with simple macros.
"""
# Read and normalize the TeX source, replacing macros with content
root_dir = os.path.dirname(root_tex_path)
tex_source = read_tex_file(root_tex_path)
tex_macros = get_macros(tex_source)
tex_source = replace_macros(tex_source, tex_macros)
return cls(tex_source, root_dir=root_dir) | Construct an `LsstLatexDoc` instance by reading and parsing the
LaTeX source.
Parameters
----------
root_tex_path : `str`
Path to the LaTeX source on the filesystem. For multi-file LaTeX
projects this should be the path to the root document.
Notes
-----
This method implements the following pipeline:
1. `lsstprojectmeta.tex.normalizer.read_tex_file`
2. `lsstprojectmeta.tex.scraper.get_macros`
3. `lsstprojectmeta.tex.normalizer.replace_macros`
Thus ``input`` and ``includes`` are resolved along with simple macros. | Below is the the instruction that describes the task:
### Input:
Construct an `LsstLatexDoc` instance by reading and parsing the
LaTeX source.
Parameters
----------
root_tex_path : `str`
Path to the LaTeX source on the filesystem. For multi-file LaTeX
projects this should be the path to the root document.
Notes
-----
This method implements the following pipeline:
1. `lsstprojectmeta.tex.normalizer.read_tex_file`
2. `lsstprojectmeta.tex.scraper.get_macros`
3. `lsstprojectmeta.tex.normalizer.replace_macros`
Thus ``input`` and ``includes`` are resolved along with simple macros.
### Response:
def read(cls, root_tex_path):
"""Construct an `LsstLatexDoc` instance by reading and parsing the
LaTeX source.
Parameters
----------
root_tex_path : `str`
Path to the LaTeX source on the filesystem. For multi-file LaTeX
projects this should be the path to the root document.
Notes
-----
This method implements the following pipeline:
1. `lsstprojectmeta.tex.normalizer.read_tex_file`
2. `lsstprojectmeta.tex.scraper.get_macros`
3. `lsstprojectmeta.tex.normalizer.replace_macros`
Thus ``input`` and ``includes`` are resolved along with simple macros.
"""
# Read and normalize the TeX source, replacing macros with content
root_dir = os.path.dirname(root_tex_path)
tex_source = read_tex_file(root_tex_path)
tex_macros = get_macros(tex_source)
tex_source = replace_macros(tex_source, tex_macros)
return cls(tex_source, root_dir=root_dir) |
def _graph_add_edge(self, cfg_node, src_node, src_jumpkind, src_ins_addr, src_stmt_idx):
"""
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None
"""
if src_node is None:
self.graph.add_node(cfg_node)
else:
self.graph.add_edge(src_node, cfg_node, jumpkind=src_jumpkind, ins_addr=src_ins_addr,
stmt_idx=src_stmt_idx) | Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None | Below is the the instruction that describes the task:
### Input:
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None
### Response:
def _graph_add_edge(self, cfg_node, src_node, src_jumpkind, src_ins_addr, src_stmt_idx):
"""
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None
"""
if src_node is None:
self.graph.add_node(cfg_node)
else:
self.graph.add_edge(src_node, cfg_node, jumpkind=src_jumpkind, ins_addr=src_ins_addr,
stmt_idx=src_stmt_idx) |
def update(self, forecasts, observations):
"""
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
"""
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0] | Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations: | Below is the the instruction that describes the task:
### Input:
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
### Response:
def update(self, forecasts, observations):
"""
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
"""
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0] |
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options) | Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system. | Below is the the instruction that describes the task:
### Input:
Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
### Response:
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options) |
def update_meta_data(meta=None):
"""
Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary.
"""
if meta is None:
meta = {}
if 'DATE' not in meta:
meta['DATE'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if 'PROGRAM' not in meta:
meta['PROGRAM'] = "AegeanTools.catalogs"
meta['PROGVER'] = "{0}-({1})".format(__version__, __date__)
return meta | Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary. | Below is the the instruction that describes the task:
### Input:
Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary.
### Response:
def update_meta_data(meta=None):
"""
Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary.
"""
if meta is None:
meta = {}
if 'DATE' not in meta:
meta['DATE'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if 'PROGRAM' not in meta:
meta['PROGRAM'] = "AegeanTools.catalogs"
meta['PROGVER'] = "{0}-({1})".format(__version__, __date__)
return meta |
def load_rnn_checkpoint(cells, prefix, epoch):
"""Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
"""
sym, arg, aux = load_checkpoint(prefix, epoch)
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg = cell.pack_weights(arg)
return sym, arg, aux | Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``. | Below is the the instruction that describes the task:
### Input:
Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
### Response:
def load_rnn_checkpoint(cells, prefix, epoch):
"""Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
"""
sym, arg, aux = load_checkpoint(prefix, epoch)
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg = cell.pack_weights(arg)
return sym, arg, aux |
def update_device_info_list(self):
"""
Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None.
"""
#
self._release_gentl_producers()
try:
self._open_gentl_producers()
self._open_systems()
#
for system in self._systems:
#
system.update_interface_info_list(self.timeout_for_update)
#
for i_info in system.interface_info_list:
iface = i_info.create_interface()
try:
iface.open()
except (
NotInitializedException, ResourceInUseException,
InvalidHandleException, InvalidHandleException,
InvalidParameterException, AccessDeniedException,
) as e:
self._logger.debug(e, exc_info=True)
else:
self._logger.info(
'Opened Interface module, {0}.'.format(iface.id_)
)
iface.update_device_info_list(self.timeout_for_update)
self._interfaces.append(iface)
for d_info in iface.device_info_list:
self.device_info_list.append(d_info)
except LoadLibraryException as e:
self._logger.error(e, exc_info=True)
self._has_revised_device_list = False
else:
self._has_revised_device_list = True
#
self._logger.info('Updated the device information list.') | Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None. | Below is the the instruction that describes the task:
### Input:
Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None.
### Response:
def update_device_info_list(self):
"""
Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None.
"""
#
self._release_gentl_producers()
try:
self._open_gentl_producers()
self._open_systems()
#
for system in self._systems:
#
system.update_interface_info_list(self.timeout_for_update)
#
for i_info in system.interface_info_list:
iface = i_info.create_interface()
try:
iface.open()
except (
NotInitializedException, ResourceInUseException,
InvalidHandleException, InvalidHandleException,
InvalidParameterException, AccessDeniedException,
) as e:
self._logger.debug(e, exc_info=True)
else:
self._logger.info(
'Opened Interface module, {0}.'.format(iface.id_)
)
iface.update_device_info_list(self.timeout_for_update)
self._interfaces.append(iface)
for d_info in iface.device_info_list:
self.device_info_list.append(d_info)
except LoadLibraryException as e:
self._logger.error(e, exc_info=True)
self._has_revised_device_list = False
else:
self._has_revised_device_list = True
#
self._logger.info('Updated the device information list.') |
def run(self):
"""Run the App main logic.
This method should contain the core logic of the App.
"""
# read inputs
indent = int(self.tcex.playbook.read(self.args.indent))
json_data = self.tcex.playbook.read(self.args.json_data)
# get the playbook variable type
json_data_type = self.tcex.playbook.variable_type(self.args.json_data)
# convert string input to dict
if json_data_type in ['String']:
json_data = json.loads(json_data)
# generate the new "pretty" json (this will be used as an option variable)
try:
self.pretty_json = json.dumps(json_data, indent=indent, sort_keys=self.args.sort_keys)
except Exception:
self.tcex.exit(1, 'Failed parsing JSON data.')
# set the App exit message
self.exit_message = 'JSON prettified.' | Run the App main logic.
This method should contain the core logic of the App. | Below is the the instruction that describes the task:
### Input:
Run the App main logic.
This method should contain the core logic of the App.
### Response:
def run(self):
"""Run the App main logic.
This method should contain the core logic of the App.
"""
# read inputs
indent = int(self.tcex.playbook.read(self.args.indent))
json_data = self.tcex.playbook.read(self.args.json_data)
# get the playbook variable type
json_data_type = self.tcex.playbook.variable_type(self.args.json_data)
# convert string input to dict
if json_data_type in ['String']:
json_data = json.loads(json_data)
# generate the new "pretty" json (this will be used as an option variable)
try:
self.pretty_json = json.dumps(json_data, indent=indent, sort_keys=self.args.sort_keys)
except Exception:
self.tcex.exit(1, 'Failed parsing JSON data.')
# set the App exit message
self.exit_message = 'JSON prettified.' |
def create_treecolumn(self, objectlist):
"""Create a gtk.TreeViewColumn for the configuration.
"""
col = gtk.TreeViewColumn(self.title)
col.set_data('pygtkhelpers::objectlist', objectlist)
col.set_data('pygtkhelpers::column', self)
col.props.visible = self.visible
if self.expand is not None:
col.props.expand = self.expand
if self.resizable is not None:
col.props.resizable = self.resizable
if self.width is not None:
col.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
col.set_fixed_width(self.width)
for cell in self.cells:
view_cell = cell.create_renderer(self, objectlist)
view_cell.set_data('pygtkhelpers::column', self)
# XXX: better control over packing
col.pack_start(view_cell)
col.set_cell_data_func(view_cell, cell.cell_data_func)
col.set_reorderable(True)
col.set_sort_indicator(False)
col.set_sort_order(gtk.SORT_DESCENDING)
if objectlist and objectlist.sortable and self.sorted:
idx = objectlist.columns.index(self)
sort_func = self._default_sort_func
objectlist.model_sort.set_sort_func(idx, sort_func, objectlist)
col.set_sort_column_id(idx)
if objectlist and objectlist.searchable and self.searchable:
self.search_by(objectlist)
col.connect('clicked', self._on_viewcol_clicked)
return col | Create a gtk.TreeViewColumn for the configuration. | Below is the the instruction that describes the task:
### Input:
Create a gtk.TreeViewColumn for the configuration.
### Response:
def create_treecolumn(self, objectlist):
"""Create a gtk.TreeViewColumn for the configuration.
"""
col = gtk.TreeViewColumn(self.title)
col.set_data('pygtkhelpers::objectlist', objectlist)
col.set_data('pygtkhelpers::column', self)
col.props.visible = self.visible
if self.expand is not None:
col.props.expand = self.expand
if self.resizable is not None:
col.props.resizable = self.resizable
if self.width is not None:
col.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
col.set_fixed_width(self.width)
for cell in self.cells:
view_cell = cell.create_renderer(self, objectlist)
view_cell.set_data('pygtkhelpers::column', self)
# XXX: better control over packing
col.pack_start(view_cell)
col.set_cell_data_func(view_cell, cell.cell_data_func)
col.set_reorderable(True)
col.set_sort_indicator(False)
col.set_sort_order(gtk.SORT_DESCENDING)
if objectlist and objectlist.sortable and self.sorted:
idx = objectlist.columns.index(self)
sort_func = self._default_sort_func
objectlist.model_sort.set_sort_func(idx, sort_func, objectlist)
col.set_sort_column_id(idx)
if objectlist and objectlist.searchable and self.searchable:
self.search_by(objectlist)
col.connect('clicked', self._on_viewcol_clicked)
return col |
def ModuleLogger(globs):
"""Create a module level logger.
To debug a module, create a _debug variable in the module, then use the
ModuleLogger function to create a "module level" logger. When a handler
is added to this logger or a child of this logger, the _debug variable will
be incremented.
All of the calls within functions or class methods within the module should
first check to see if _debug is set to prevent calls to formatter objects
that aren't necessary.
"""
# make sure that _debug is defined
if not globs.has_key('_debug'):
raise RuntimeError("define _debug before creating a module logger")
# logger name is the module name
logger_name = globs['__name__']
# create a logger to be assigned to _log
logger = logging.getLogger(logger_name)
# put in a reference to the module globals
logger.globs = globs
# if this is a "root" logger add a default handler for warnings and up
if '.' not in logger_name:
hdlr = logging.StreamHandler()
hdlr.setLevel(logging.WARNING)
hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))
logger.addHandler(hdlr)
return logger | Create a module level logger.
To debug a module, create a _debug variable in the module, then use the
ModuleLogger function to create a "module level" logger. When a handler
is added to this logger or a child of this logger, the _debug variable will
be incremented.
All of the calls within functions or class methods within the module should
first check to see if _debug is set to prevent calls to formatter objects
that aren't necessary. | Below is the the instruction that describes the task:
### Input:
Create a module level logger.
To debug a module, create a _debug variable in the module, then use the
ModuleLogger function to create a "module level" logger. When a handler
is added to this logger or a child of this logger, the _debug variable will
be incremented.
All of the calls within functions or class methods within the module should
first check to see if _debug is set to prevent calls to formatter objects
that aren't necessary.
### Response:
def ModuleLogger(globs):
"""Create a module level logger.
To debug a module, create a _debug variable in the module, then use the
ModuleLogger function to create a "module level" logger. When a handler
is added to this logger or a child of this logger, the _debug variable will
be incremented.
All of the calls within functions or class methods within the module should
first check to see if _debug is set to prevent calls to formatter objects
that aren't necessary.
"""
# make sure that _debug is defined
if not globs.has_key('_debug'):
raise RuntimeError("define _debug before creating a module logger")
# logger name is the module name
logger_name = globs['__name__']
# create a logger to be assigned to _log
logger = logging.getLogger(logger_name)
# put in a reference to the module globals
logger.globs = globs
# if this is a "root" logger add a default handler for warnings and up
if '.' not in logger_name:
hdlr = logging.StreamHandler()
hdlr.setLevel(logging.WARNING)
hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))
logger.addHandler(hdlr)
return logger |
def create_store():
"""
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
"""
new_storage = _proxy('store')
_state.store = type('store', (object,), {})
new_storage.store = dict()
return new_storage.store | A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary | Below is the the instruction that describes the task:
### Input:
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
### Response:
def create_store():
"""
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
"""
new_storage = _proxy('store')
_state.store = type('store', (object,), {})
new_storage.store = dict()
return new_storage.store |
def encode(self, x):
"""
Given an input array `x` it returns its associated encoding `y(x)`, that is,
a stable configuration (local energy minimum) of the hidden units
while the visible units are clampled to `x`.
Note that NO learning takes place.
"""
E = self.energy
y_min = self.find_energy_minimum(E, x)
return y_min | Given an input array `x` it returns its associated encoding `y(x)`, that is,
a stable configuration (local energy minimum) of the hidden units
while the visible units are clampled to `x`.
Note that NO learning takes place. | Below is the the instruction that describes the task:
### Input:
Given an input array `x` it returns its associated encoding `y(x)`, that is,
a stable configuration (local energy minimum) of the hidden units
while the visible units are clampled to `x`.
Note that NO learning takes place.
### Response:
def encode(self, x):
"""
Given an input array `x` it returns its associated encoding `y(x)`, that is,
a stable configuration (local energy minimum) of the hidden units
while the visible units are clampled to `x`.
Note that NO learning takes place.
"""
E = self.energy
y_min = self.find_energy_minimum(E, x)
return y_min |
def as_child(cls, global_config, parent=None):
'''Run a single job in a child process.
This method never returns; it always calls :func:`sys.exit`
with an error code that says what it did.
'''
try:
setproctitle('rejester worker')
random.seed() # otherwise everyone inherits the same seed
yakonfig.set_default_config([yakonfig, dblogger, rejester],
config=global_config)
worker = cls(yakonfig.get_global_config(rejester.config_name))
worker.register(parent=parent)
did_work = worker.run(set_title=True)
worker.unregister()
if did_work:
sys.exit(cls.EXIT_SUCCESS)
else:
sys.exit(cls.EXIT_BORED)
except Exception, e:
# There's some off chance we have logging.
# You will be here if redis is down, for instance,
# and the yakonfig dblogger setup runs but then
# the get_work call fails with an exception.
if len(logging.root.handlers) > 0:
logger.critical('failed to do any work', exc_info=e)
sys.exit(cls.EXIT_EXCEPTION) | Run a single job in a child process.
This method never returns; it always calls :func:`sys.exit`
with an error code that says what it did. | Below is the the instruction that describes the task:
### Input:
Run a single job in a child process.
This method never returns; it always calls :func:`sys.exit`
with an error code that says what it did.
### Response:
def as_child(cls, global_config, parent=None):
'''Run a single job in a child process.
This method never returns; it always calls :func:`sys.exit`
with an error code that says what it did.
'''
try:
setproctitle('rejester worker')
random.seed() # otherwise everyone inherits the same seed
yakonfig.set_default_config([yakonfig, dblogger, rejester],
config=global_config)
worker = cls(yakonfig.get_global_config(rejester.config_name))
worker.register(parent=parent)
did_work = worker.run(set_title=True)
worker.unregister()
if did_work:
sys.exit(cls.EXIT_SUCCESS)
else:
sys.exit(cls.EXIT_BORED)
except Exception, e:
# There's some off chance we have logging.
# You will be here if redis is down, for instance,
# and the yakonfig dblogger setup runs but then
# the get_work call fails with an exception.
if len(logging.root.handlers) > 0:
logger.critical('failed to do any work', exc_info=e)
sys.exit(cls.EXIT_EXCEPTION) |
def is_bit_mask(enumeration, potential_mask):
"""
A utility function that checks if the provided value is a composite bit
mask of enumeration values in the specified enumeration class.
Args:
enumeration (class): One of the mask enumeration classes found in this
file. These include:
* Cryptographic Usage Mask
* Protection Storage Mask
* Storage Status Mask
potential_mask (int): A potential bit mask composed of enumeration
values belonging to the enumeration class.
Returns:
True: if the potential mask is a valid bit mask of the mask enumeration
False: otherwise
"""
if not isinstance(potential_mask, six.integer_types):
return False
mask_enumerations = (
CryptographicUsageMask,
ProtectionStorageMask,
StorageStatusMask
)
if enumeration not in mask_enumerations:
return False
mask = 0
for value in [e.value for e in enumeration]:
if (value & potential_mask) == value:
mask |= value
if mask != potential_mask:
return False
return True | A utility function that checks if the provided value is a composite bit
mask of enumeration values in the specified enumeration class.
Args:
enumeration (class): One of the mask enumeration classes found in this
file. These include:
* Cryptographic Usage Mask
* Protection Storage Mask
* Storage Status Mask
potential_mask (int): A potential bit mask composed of enumeration
values belonging to the enumeration class.
Returns:
True: if the potential mask is a valid bit mask of the mask enumeration
False: otherwise | Below is the the instruction that describes the task:
### Input:
A utility function that checks if the provided value is a composite bit
mask of enumeration values in the specified enumeration class.
Args:
enumeration (class): One of the mask enumeration classes found in this
file. These include:
* Cryptographic Usage Mask
* Protection Storage Mask
* Storage Status Mask
potential_mask (int): A potential bit mask composed of enumeration
values belonging to the enumeration class.
Returns:
True: if the potential mask is a valid bit mask of the mask enumeration
False: otherwise
### Response:
def is_bit_mask(enumeration, potential_mask):
"""
A utility function that checks if the provided value is a composite bit
mask of enumeration values in the specified enumeration class.
Args:
enumeration (class): One of the mask enumeration classes found in this
file. These include:
* Cryptographic Usage Mask
* Protection Storage Mask
* Storage Status Mask
potential_mask (int): A potential bit mask composed of enumeration
values belonging to the enumeration class.
Returns:
True: if the potential mask is a valid bit mask of the mask enumeration
False: otherwise
"""
if not isinstance(potential_mask, six.integer_types):
return False
mask_enumerations = (
CryptographicUsageMask,
ProtectionStorageMask,
StorageStatusMask
)
if enumeration not in mask_enumerations:
return False
mask = 0
for value in [e.value for e in enumeration]:
if (value & potential_mask) == value:
mask |= value
if mask != potential_mask:
return False
return True |
def content_type(self, value=None):
""" Set (replace) and or get "Content-Type" header value
:param value: value to set (if specified)
:return: None if header doesn't exist, otherwise - str
"""
content_type = self.normalize_name('Content-Type')
if value is not None:
self.replace_headers(content_type, value)
if content_type in self.__headers.keys():
return self.__headers[content_type][0] | Set (replace) and or get "Content-Type" header value
:param value: value to set (if specified)
:return: None if header doesn't exist, otherwise - str | Below is the the instruction that describes the task:
### Input:
Set (replace) and or get "Content-Type" header value
:param value: value to set (if specified)
:return: None if header doesn't exist, otherwise - str
### Response:
def content_type(self, value=None):
""" Set (replace) and or get "Content-Type" header value
:param value: value to set (if specified)
:return: None if header doesn't exist, otherwise - str
"""
content_type = self.normalize_name('Content-Type')
if value is not None:
self.replace_headers(content_type, value)
if content_type in self.__headers.keys():
return self.__headers[content_type][0] |
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
if kernel_name == CURRENT_ENV_KERNEL_NAME:
return self.kernel_spec_class(
resource_dir=ipykernel.kernelspec.RESOURCES,
**ipykernel.kernelspec.get_kernel_dict())
else:
return super(NbvalKernelspecManager, self).get_kernel_spec(kernel_name) | Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found. | Below is the the instruction that describes the task:
### Input:
Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
### Response:
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
if kernel_name == CURRENT_ENV_KERNEL_NAME:
return self.kernel_spec_class(
resource_dir=ipykernel.kernelspec.RESOURCES,
**ipykernel.kernelspec.get_kernel_dict())
else:
return super(NbvalKernelspecManager, self).get_kernel_spec(kernel_name) |
def reset_to_flows(self, force=False):
""" Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
super().reset_to_flows(force=force, _meta=self.meta)
return self | Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False | Below is the the instruction that describes the task:
### Input:
Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
### Response:
def reset_to_flows(self, force=False):
""" Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
super().reset_to_flows(force=force, _meta=self.meta)
return self |
def list(self, request, *args, **kwargs):
"""
To get a list of default price list items, run **GET** against */api/default-price-list-items/*
as authenticated user.
Price lists can be filtered by:
- ?key=<string>
- ?item_type=<string> has to be from list of available item_types
(available options: 'flavor', 'storage', 'license-os', 'license-application', 'network', 'support')
- ?resource_type=<string> resource type, for example: 'OpenStack.Instance, 'Oracle.Database')
"""
return super(DefaultPriceListItemViewSet, self).list(request, *args, **kwargs) | To get a list of default price list items, run **GET** against */api/default-price-list-items/*
as authenticated user.
Price lists can be filtered by:
- ?key=<string>
- ?item_type=<string> has to be from list of available item_types
(available options: 'flavor', 'storage', 'license-os', 'license-application', 'network', 'support')
- ?resource_type=<string> resource type, for example: 'OpenStack.Instance, 'Oracle.Database') | Below is the the instruction that describes the task:
### Input:
To get a list of default price list items, run **GET** against */api/default-price-list-items/*
as authenticated user.
Price lists can be filtered by:
- ?key=<string>
- ?item_type=<string> has to be from list of available item_types
(available options: 'flavor', 'storage', 'license-os', 'license-application', 'network', 'support')
- ?resource_type=<string> resource type, for example: 'OpenStack.Instance, 'Oracle.Database')
### Response:
def list(self, request, *args, **kwargs):
"""
To get a list of default price list items, run **GET** against */api/default-price-list-items/*
as authenticated user.
Price lists can be filtered by:
- ?key=<string>
- ?item_type=<string> has to be from list of available item_types
(available options: 'flavor', 'storage', 'license-os', 'license-application', 'network', 'support')
- ?resource_type=<string> resource type, for example: 'OpenStack.Instance, 'Oracle.Database')
"""
return super(DefaultPriceListItemViewSet, self).list(request, *args, **kwargs) |
def update_func_body(original, updater=None):
"""Update all function body using the updating function."""
updated = ''
regex = r'([_\w][_\w\d]*)\s*\(.*\)\s*\{'
match = re.search(regex, original)
while match:
name = match.group(1)
logging.debug(_('Found candidate: %s'), name)
start = match.end()
end = start + find_balance_index(original[start:])
body = original[start:end]
if updater:
body = updater(body, name)
updated += original[:start] + '\n' + body + original[end]
original = original[end + 1:]
match = re.search(regex, original)
return updated | Update all function body using the updating function. | Below is the the instruction that describes the task:
### Input:
Update all function body using the updating function.
### Response:
def update_func_body(original, updater=None):
"""Update all function body using the updating function."""
updated = ''
regex = r'([_\w][_\w\d]*)\s*\(.*\)\s*\{'
match = re.search(regex, original)
while match:
name = match.group(1)
logging.debug(_('Found candidate: %s'), name)
start = match.end()
end = start + find_balance_index(original[start:])
body = original[start:end]
if updater:
body = updater(body, name)
updated += original[:start] + '\n' + body + original[end]
original = original[end + 1:]
match = re.search(regex, original)
return updated |
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
child = self.body[0]
# py2.5 try: except: finally:
if (
isinstance(child, TryExcept)
and child.fromlineno == self.fromlineno
and child.tolineno >= lineno > self.fromlineno
):
return child.block_range(lineno)
return self._elsed_block_range(lineno, self.finalbody) | Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int) | Below is the the instruction that describes the task:
### Input:
Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
### Response:
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
child = self.body[0]
# py2.5 try: except: finally:
if (
isinstance(child, TryExcept)
and child.fromlineno == self.fromlineno
and child.tolineno >= lineno > self.fromlineno
):
return child.block_range(lineno)
return self._elsed_block_range(lineno, self.finalbody) |
def evaluate(g: Graph,
schema: Union[str, ShExJ.Schema],
focus: Optional[Union[str, URIRef, IRIREF]],
start: Optional[Union[str, URIRef, IRIREF, START, START_TYPE]]=None,
debug_trace: bool = False) -> Tuple[bool, Optional[str]]:
""" Evaluate focus node `focus` in graph `g` against shape `shape` in ShEx schema `schema`
:param g: Graph containing RDF
:param schema: ShEx Schema -- if str, it will be parsed
:param focus: focus node in g. If not specified, all URI subjects in G will be evaluated.
:param start: Starting shape. If omitted, the Schema start shape is used
:param debug_trace: Turn on debug tracing
:return: None if success or failure reason if failure
"""
if isinstance(schema, str):
schema = SchemaLoader().loads(schema)
if schema is None:
return False, "Error parsing schema"
if not isinstance(focus, URIRef):
focus = URIRef(str(focus))
if start is None:
start = str(schema.start) if schema.start else None
if start is None:
return False, "No starting shape"
if not isinstance(start, IRIREF) and start is not START and start is not START_TYPE:
start = IRIREF(str(start))
cntxt = Context(g, schema)
cntxt.debug_context.debug = debug_trace
map_ = FixedShapeMap()
map_.add(ShapeAssociation(focus, start))
test_result, reasons = isValid(cntxt, map_)
return test_result, '\n'.join(reasons) | Evaluate focus node `focus` in graph `g` against shape `shape` in ShEx schema `schema`
:param g: Graph containing RDF
:param schema: ShEx Schema -- if str, it will be parsed
:param focus: focus node in g. If not specified, all URI subjects in G will be evaluated.
:param start: Starting shape. If omitted, the Schema start shape is used
:param debug_trace: Turn on debug tracing
:return: None if success or failure reason if failure | Below is the the instruction that describes the task:
### Input:
Evaluate focus node `focus` in graph `g` against shape `shape` in ShEx schema `schema`
:param g: Graph containing RDF
:param schema: ShEx Schema -- if str, it will be parsed
:param focus: focus node in g. If not specified, all URI subjects in G will be evaluated.
:param start: Starting shape. If omitted, the Schema start shape is used
:param debug_trace: Turn on debug tracing
:return: None if success or failure reason if failure
### Response:
def evaluate(g: Graph,
schema: Union[str, ShExJ.Schema],
focus: Optional[Union[str, URIRef, IRIREF]],
start: Optional[Union[str, URIRef, IRIREF, START, START_TYPE]]=None,
debug_trace: bool = False) -> Tuple[bool, Optional[str]]:
""" Evaluate focus node `focus` in graph `g` against shape `shape` in ShEx schema `schema`
:param g: Graph containing RDF
:param schema: ShEx Schema -- if str, it will be parsed
:param focus: focus node in g. If not specified, all URI subjects in G will be evaluated.
:param start: Starting shape. If omitted, the Schema start shape is used
:param debug_trace: Turn on debug tracing
:return: None if success or failure reason if failure
"""
if isinstance(schema, str):
schema = SchemaLoader().loads(schema)
if schema is None:
return False, "Error parsing schema"
if not isinstance(focus, URIRef):
focus = URIRef(str(focus))
if start is None:
start = str(schema.start) if schema.start else None
if start is None:
return False, "No starting shape"
if not isinstance(start, IRIREF) and start is not START and start is not START_TYPE:
start = IRIREF(str(start))
cntxt = Context(g, schema)
cntxt.debug_context.debug = debug_trace
map_ = FixedShapeMap()
map_.add(ShapeAssociation(focus, start))
test_result, reasons = isValid(cntxt, map_)
return test_result, '\n'.join(reasons) |
def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
"""Derive correct name of BAM file based on batching.
"""
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file | Derive correct name of BAM file based on batching. | Below is the the instruction that describes the task:
### Input:
Derive correct name of BAM file based on batching.
### Response:
def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
"""Derive correct name of BAM file based on batching.
"""
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file |
def get_request_body_chunk(self, content: bytes, closed: bool,
more_content: bool) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
'''
return {
'content': content,
'closed': closed,
'more_content': more_content
} | http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk | Below is the the instruction that describes the task:
### Input:
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
### Response:
def get_request_body_chunk(self, content: bytes, closed: bool,
more_content: bool) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
'''
return {
'content': content,
'closed': closed,
'more_content': more_content
} |
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)() | Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` . | Below is the the instruction that describes the task:
### Input:
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
### Response:
def computePi(self,method='power'):
"""
Calculate the steady state distribution using your preferred method and store it in the attribute `pi`.
By default uses the most robust method, 'power'. Other methods are 'eigen','linear', and 'krylov'
Parameters
----------
method : string, optional(default='power')
The method for obtaining ``pi``. The possible options are 'power','eigen','linear','krylov'.
Example
-------
>>> P = np.array([[0.5,0.5],[0.6,0.4]])
>>> mc = markovChain(P)
>>> mc.computePi('power')
>>> print(mc.pi)
[ 0.54545455 0.45454545]
See Also
--------
For details about the specific methods see
:func:`powerMethod`,
:func:`eigenMethod`,
:func:`linearMethod`, and
:func:`krylovMethod` .
"""
methodSet = ['power','eigen','linear','krylov']
assert method in methodSet, "Incorrect method specified. Choose from %r" % methodSet
method = method + 'Method'
return getattr(self,method)() |
def get_subadres_by_id(self, id):
'''
Retrieve a `Subadres` by the Id.
:param integer id: the Id of the `Subadres`
:rtype: :class:`Subadres`
'''
def creator():
res = crab_gateway_request(
self.client, 'GetSubadresWithStatusBySubadresId', id
)
if res == None:
raise GatewayResourceNotFoundException()
return Subadres(
res.SubadresId,
res.Subadres,
res.StatusSubadres,
res.HuisnummerId,
res.AardSubadres,
Metadata(
res.BeginDatum,
res.BeginTijd,
self.get_bewerking(res.BeginBewerking),
self.get_organisatie(res.BeginOrganisatie)
)
)
if self.caches['short'].is_configured:
key = 'GetSubadresWithStatusBySubadresId#%s' % (id)
subadres = self.caches['short'].get_or_create(key, creator)
else:
subadres = creator()
subadres.set_gateway(self)
return subadres | Retrieve a `Subadres` by the Id.
:param integer id: the Id of the `Subadres`
:rtype: :class:`Subadres` | Below is the the instruction that describes the task:
### Input:
Retrieve a `Subadres` by the Id.
:param integer id: the Id of the `Subadres`
:rtype: :class:`Subadres`
### Response:
def get_subadres_by_id(self, id):
'''
Retrieve a `Subadres` by the Id.
:param integer id: the Id of the `Subadres`
:rtype: :class:`Subadres`
'''
def creator():
res = crab_gateway_request(
self.client, 'GetSubadresWithStatusBySubadresId', id
)
if res == None:
raise GatewayResourceNotFoundException()
return Subadres(
res.SubadresId,
res.Subadres,
res.StatusSubadres,
res.HuisnummerId,
res.AardSubadres,
Metadata(
res.BeginDatum,
res.BeginTijd,
self.get_bewerking(res.BeginBewerking),
self.get_organisatie(res.BeginOrganisatie)
)
)
if self.caches['short'].is_configured:
key = 'GetSubadresWithStatusBySubadresId#%s' % (id)
subadres = self.caches['short'].get_or_create(key, creator)
else:
subadres = creator()
subadres.set_gateway(self)
return subadres |
def delete_by_id(self, webhook, params={}, **options):
"""This method permanently removes a webhook. Note that it may be possible
to receive a request that was already in flight after deleting the
webhook, but no further requests will be issued.
Parameters
----------
webhook : {Id} The webhook to delete.
"""
path = "/webhooks/%s" % (webhook)
return self.client.delete(path, params, **options) | This method permanently removes a webhook. Note that it may be possible
to receive a request that was already in flight after deleting the
webhook, but no further requests will be issued.
Parameters
----------
webhook : {Id} The webhook to delete. | Below is the the instruction that describes the task:
### Input:
This method permanently removes a webhook. Note that it may be possible
to receive a request that was already in flight after deleting the
webhook, but no further requests will be issued.
Parameters
----------
webhook : {Id} The webhook to delete.
### Response:
def delete_by_id(self, webhook, params={}, **options):
"""This method permanently removes a webhook. Note that it may be possible
to receive a request that was already in flight after deleting the
webhook, but no further requests will be issued.
Parameters
----------
webhook : {Id} The webhook to delete.
"""
path = "/webhooks/%s" % (webhook)
return self.client.delete(path, params, **options) |
def upload_html(destination, html, name=None):
"""
Uploads the HTML to a file on the server
"""
[project, path, n] = parse_destination(destination)
try:
dxfile = dxpy.upload_string(html, media_type="text/html", project=project, folder=path, hidden=True, name=name or None)
return dxfile.get_id()
except dxpy.DXAPIError as ex:
parser.error("Could not upload HTML report to DNAnexus server! ({ex})".format(ex=ex)) | Uploads the HTML to a file on the server | Below is the the instruction that describes the task:
### Input:
Uploads the HTML to a file on the server
### Response:
def upload_html(destination, html, name=None):
"""
Uploads the HTML to a file on the server
"""
[project, path, n] = parse_destination(destination)
try:
dxfile = dxpy.upload_string(html, media_type="text/html", project=project, folder=path, hidden=True, name=name or None)
return dxfile.get_id()
except dxpy.DXAPIError as ex:
parser.error("Could not upload HTML report to DNAnexus server! ({ex})".format(ex=ex)) |
def create_logger(app: 'Quart') -> Logger:
"""Create a logger for the app based on the app settings.
This creates a logger named quart.app that has a log level based
on the app configuration.
"""
logger = getLogger('quart.app')
if app.debug and logger.level == NOTSET:
logger.setLevel(DEBUG)
logger.addHandler(default_handler)
return logger | Create a logger for the app based on the app settings.
This creates a logger named quart.app that has a log level based
on the app configuration. | Below is the the instruction that describes the task:
### Input:
Create a logger for the app based on the app settings.
This creates a logger named quart.app that has a log level based
on the app configuration.
### Response:
def create_logger(app: 'Quart') -> Logger:
"""Create a logger for the app based on the app settings.
This creates a logger named quart.app that has a log level based
on the app configuration.
"""
logger = getLogger('quart.app')
if app.debug and logger.level == NOTSET:
logger.setLevel(DEBUG)
logger.addHandler(default_handler)
return logger |
def _change_generic(env, model_name, record_ids, target_record_id,
exclude_columns, method='orm'):
""" Update known generic style res_id/res_model references """
for model_to_replace, res_id_column, model_column in [
('calendar.event', 'res_id', 'res_model'),
('ir.attachment', 'res_id', 'res_model'),
('mail.activity', 'res_id', 'res_model'),
('mail.followers', 'res_id', 'res_model'),
('mail.message', 'res_id', 'model'),
('rating.rating', 'res_id', 'res_model'),
]:
try:
model = env[model_to_replace].with_context(active_test=False)
except KeyError:
continue
if (model._table, res_id_column) in exclude_columns:
continue
if method == 'orm':
records = model.search([
(model_column, '=', model_name),
(res_id_column, 'in', record_ids)])
if records:
records.write({res_id_column: target_record_id})
logger.debug(
"Changed %s record(s) of model '%s'",
len(records), model_to_replace)
else:
logged_query(
env.cr,
""" UPDATE %(table)s
SET %(res_id_column)s = %(target_record_id)s
WHERE %(model_column)s = %(model_name)s
AND %(res_id_column)s in %(record_ids)s
""",
{
'table': AsIs(model._table),
'res_id_column': AsIs(res_id_column),
'model_column': AsIs(model_column),
'model_name': model_name,
'target_record_id': target_record_id,
'record_ids': record_ids,
}, skip_no_result=True) | Update known generic style res_id/res_model references | Below is the the instruction that describes the task:
### Input:
Update known generic style res_id/res_model references
### Response:
def _change_generic(env, model_name, record_ids, target_record_id,
exclude_columns, method='orm'):
""" Update known generic style res_id/res_model references """
for model_to_replace, res_id_column, model_column in [
('calendar.event', 'res_id', 'res_model'),
('ir.attachment', 'res_id', 'res_model'),
('mail.activity', 'res_id', 'res_model'),
('mail.followers', 'res_id', 'res_model'),
('mail.message', 'res_id', 'model'),
('rating.rating', 'res_id', 'res_model'),
]:
try:
model = env[model_to_replace].with_context(active_test=False)
except KeyError:
continue
if (model._table, res_id_column) in exclude_columns:
continue
if method == 'orm':
records = model.search([
(model_column, '=', model_name),
(res_id_column, 'in', record_ids)])
if records:
records.write({res_id_column: target_record_id})
logger.debug(
"Changed %s record(s) of model '%s'",
len(records), model_to_replace)
else:
logged_query(
env.cr,
""" UPDATE %(table)s
SET %(res_id_column)s = %(target_record_id)s
WHERE %(model_column)s = %(model_name)s
AND %(res_id_column)s in %(record_ids)s
""",
{
'table': AsIs(model._table),
'res_id_column': AsIs(res_id_column),
'model_column': AsIs(model_column),
'model_name': model_name,
'target_record_id': target_record_id,
'record_ids': record_ids,
}, skip_no_result=True) |
def subscribe(self):
"""
Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`.
"""
self.conn("POST", "{0}/users/ME/endpoints/{1}/subscriptions".format(self.conn.msgsHost, self.id),
auth=SkypeConnection.Auth.RegToken,
json={"interestedResources": ["/v1/threads/ALL",
"/v1/users/ME/contacts/ALL",
"/v1/users/ME/conversations/ALL/messages",
"/v1/users/ME/conversations/ALL/properties"],
"template": "raw",
"channelType": "httpLongPoll"})
self.subscribed = True | Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`. | Below is the the instruction that describes the task:
### Input:
Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`.
### Response:
def subscribe(self):
"""
Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`.
"""
self.conn("POST", "{0}/users/ME/endpoints/{1}/subscriptions".format(self.conn.msgsHost, self.id),
auth=SkypeConnection.Auth.RegToken,
json={"interestedResources": ["/v1/threads/ALL",
"/v1/users/ME/contacts/ALL",
"/v1/users/ME/conversations/ALL/messages",
"/v1/users/ME/conversations/ALL/properties"],
"template": "raw",
"channelType": "httpLongPoll"})
self.subscribed = True |
def close_client_stream(client_stream, unix_path):
""" Closes provided client stream """
try:
client_stream.shutdown(socket.SHUT_RDWR)
if unix_path:
logger.debug('%s: Connection closed', unix_path)
else:
peer = client_stream.getpeername()
logger.debug('%s:%s: Connection closed', peer[0], peer[1])
except (socket.error, OSError) as exception:
logger.debug('Connection closing error: %s', exception)
client_stream.close() | Closes provided client stream | Below is the the instruction that describes the task:
### Input:
Closes provided client stream
### Response:
def close_client_stream(client_stream, unix_path):
""" Closes provided client stream """
try:
client_stream.shutdown(socket.SHUT_RDWR)
if unix_path:
logger.debug('%s: Connection closed', unix_path)
else:
peer = client_stream.getpeername()
logger.debug('%s:%s: Connection closed', peer[0], peer[1])
except (socket.error, OSError) as exception:
logger.debug('Connection closing error: %s', exception)
client_stream.close() |
def conformPadding(cls, chars):
"""
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
"""
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad | Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters | Below is the the instruction that describes the task:
### Input:
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
### Response:
def conformPadding(cls, chars):
"""
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
"""
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad |
def prefix(self, prefix, lowercase=True):
''' Returns a dictionary of keys with the same prefix.
Compat with kr/env, lowercased.
> xdg = env.prefix('XDG_')
> for key, value in xdg.items():
print('%-20s' % key, value[:6], '…')
config_dirs /etc/x…
current_desktop MATE
data_dirs /usr/s…
…
'''
env_subset = {}
for key in self._envars.keys():
if key.startswith(prefix):
new_key = key[len(prefix):] # cut front
new_key = new_key.lower() if lowercase else new_key
env_subset[new_key] = str(self._envars[key]) # str strips Entry
return Environment(environ=env_subset,
sensitive=self._sensitive,
blankify=self._blankify,
noneify=self._noneify,
writable=self._writable,
) | Returns a dictionary of keys with the same prefix.
Compat with kr/env, lowercased.
> xdg = env.prefix('XDG_')
> for key, value in xdg.items():
print('%-20s' % key, value[:6], '…')
config_dirs /etc/x…
current_desktop MATE
data_dirs /usr/s…
… | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of keys with the same prefix.
Compat with kr/env, lowercased.
> xdg = env.prefix('XDG_')
> for key, value in xdg.items():
print('%-20s' % key, value[:6], '…')
config_dirs /etc/x…
current_desktop MATE
data_dirs /usr/s…
…
### Response:
def prefix(self, prefix, lowercase=True):
''' Returns a dictionary of keys with the same prefix.
Compat with kr/env, lowercased.
> xdg = env.prefix('XDG_')
> for key, value in xdg.items():
print('%-20s' % key, value[:6], '…')
config_dirs /etc/x…
current_desktop MATE
data_dirs /usr/s…
…
'''
env_subset = {}
for key in self._envars.keys():
if key.startswith(prefix):
new_key = key[len(prefix):] # cut front
new_key = new_key.lower() if lowercase else new_key
env_subset[new_key] = str(self._envars[key]) # str strips Entry
return Environment(environ=env_subset,
sensitive=self._sensitive,
blankify=self._blankify,
noneify=self._noneify,
writable=self._writable,
) |
def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i) | Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops). | Below is the the instruction that describes the task:
### Input:
Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
### Response:
def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i) |
def _convert_option(self):
'''
Determines how to convert CDF byte ordering to the system
byte ordering.
'''
if sys.byteorder == 'little' and self._endian() == 'big-endian':
# big->little
order = '>'
elif sys.byteorder == 'big' and self._endian() == 'little-endian':
# little->big
order = '<'
else:
# no conversion
order = '='
return order | Determines how to convert CDF byte ordering to the system
byte ordering. | Below is the the instruction that describes the task:
### Input:
Determines how to convert CDF byte ordering to the system
byte ordering.
### Response:
def _convert_option(self):
'''
Determines how to convert CDF byte ordering to the system
byte ordering.
'''
if sys.byteorder == 'little' and self._endian() == 'big-endian':
# big->little
order = '>'
elif sys.byteorder == 'big' and self._endian() == 'little-endian':
# little->big
order = '<'
else:
# no conversion
order = '='
return order |
def backup(outputdir):
"""
Backup an anchore installation to a tarfile.
"""
ecode = 0
try:
anchore_print('Backing up anchore system to directory '+str(outputdir)+' ...')
backupfile = config.backup(outputdir)
anchore_print({"anchore_backup_tarball":str(backupfile)}, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | Backup an anchore installation to a tarfile. | Below is the the instruction that describes the task:
### Input:
Backup an anchore installation to a tarfile.
### Response:
def backup(outputdir):
"""
Backup an anchore installation to a tarfile.
"""
ecode = 0
try:
anchore_print('Backing up anchore system to directory '+str(outputdir)+' ...')
backupfile = config.backup(outputdir)
anchore_print({"anchore_backup_tarball":str(backupfile)}, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) |
def _condition_entries(self):
"""Extracts any ARNs, Account Numbers, UserIDs, Usernames, CIDRs, VPCs, and VPC Endpoints from a condition block.
Ignores any negated condition operators like StringNotLike.
Ignores weak condition keys like referer, date, etc.
Reason: A condition is meant to limit the principal in a statement. Often, resource policies use a wildcard principal
and rely exclusively on the Condition block to limit access.
We would want to alert if the Condition had no limitations (like a non-existent Condition block), or very weak limitations. Any negation
would be weak, and largely equivelant to having no condition block whatsoever.
The alerting code that relies on this data must ensure the condition has at least one of the following:
- A limiting ARN
- Account Identifier
- AWS Organization Principal Org ID
- User ID
- Source IP / CIDR
- VPC
- VPC Endpoint
https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html
"""
conditions = list()
condition = self.statement.get('Condition')
if not condition:
return conditions
key_mapping = {
'aws:sourcearn': 'arn',
'aws:sourceowner': 'account',
'aws:sourceaccount': 'account',
'aws:principalorgid': 'org-id',
'kms:calleraccount': 'account',
'aws:userid': 'userid',
'aws:sourceip': 'cidr',
'aws:sourcevpc': 'vpc',
'aws:sourcevpce': 'vpce'
}
relevant_condition_operators = [
re.compile('((ForAllValues|ForAnyValue):)?ARN(Equals|Like)(IfExists)?', re.IGNORECASE),
re.compile('((ForAllValues|ForAnyValue):)?String(Equals|Like)(IgnoreCase)?(IfExists)?', re.IGNORECASE),
re.compile('((ForAllValues|ForAnyValue):)?IpAddress(IfExists)?', re.IGNORECASE)]
for condition_operator in condition.keys():
if any(regex.match(condition_operator) for regex in relevant_condition_operators):
for key, value in condition[condition_operator].items():
# ForAllValues and ForAnyValue must be paired with a list.
# Otherwise, skip over entries.
if not isinstance(value, list) and condition_operator.lower().startswith('for'):
continue
if key.lower() in key_mapping:
if isinstance(value, list):
for v in value:
conditions.append(
ConditionTuple(value=v, category=key_mapping[key.lower()]))
else:
conditions.append(
ConditionTuple(value=value, category=key_mapping[key.lower()]))
return conditions | Extracts any ARNs, Account Numbers, UserIDs, Usernames, CIDRs, VPCs, and VPC Endpoints from a condition block.
Ignores any negated condition operators like StringNotLike.
Ignores weak condition keys like referer, date, etc.
Reason: A condition is meant to limit the principal in a statement. Often, resource policies use a wildcard principal
and rely exclusively on the Condition block to limit access.
We would want to alert if the Condition had no limitations (like a non-existent Condition block), or very weak limitations. Any negation
would be weak, and largely equivelant to having no condition block whatsoever.
The alerting code that relies on this data must ensure the condition has at least one of the following:
- A limiting ARN
- Account Identifier
- AWS Organization Principal Org ID
- User ID
- Source IP / CIDR
- VPC
- VPC Endpoint
https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html | Below is the the instruction that describes the task:
### Input:
Extracts any ARNs, Account Numbers, UserIDs, Usernames, CIDRs, VPCs, and VPC Endpoints from a condition block.
Ignores any negated condition operators like StringNotLike.
Ignores weak condition keys like referer, date, etc.
Reason: A condition is meant to limit the principal in a statement. Often, resource policies use a wildcard principal
and rely exclusively on the Condition block to limit access.
We would want to alert if the Condition had no limitations (like a non-existent Condition block), or very weak limitations. Any negation
would be weak, and largely equivelant to having no condition block whatsoever.
The alerting code that relies on this data must ensure the condition has at least one of the following:
- A limiting ARN
- Account Identifier
- AWS Organization Principal Org ID
- User ID
- Source IP / CIDR
- VPC
- VPC Endpoint
https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html
### Response:
def _condition_entries(self):
"""Extracts any ARNs, Account Numbers, UserIDs, Usernames, CIDRs, VPCs, and VPC Endpoints from a condition block.
Ignores any negated condition operators like StringNotLike.
Ignores weak condition keys like referer, date, etc.
Reason: A condition is meant to limit the principal in a statement. Often, resource policies use a wildcard principal
and rely exclusively on the Condition block to limit access.
We would want to alert if the Condition had no limitations (like a non-existent Condition block), or very weak limitations. Any negation
would be weak, and largely equivelant to having no condition block whatsoever.
The alerting code that relies on this data must ensure the condition has at least one of the following:
- A limiting ARN
- Account Identifier
- AWS Organization Principal Org ID
- User ID
- Source IP / CIDR
- VPC
- VPC Endpoint
https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html
"""
conditions = list()
condition = self.statement.get('Condition')
if not condition:
return conditions
key_mapping = {
'aws:sourcearn': 'arn',
'aws:sourceowner': 'account',
'aws:sourceaccount': 'account',
'aws:principalorgid': 'org-id',
'kms:calleraccount': 'account',
'aws:userid': 'userid',
'aws:sourceip': 'cidr',
'aws:sourcevpc': 'vpc',
'aws:sourcevpce': 'vpce'
}
relevant_condition_operators = [
re.compile('((ForAllValues|ForAnyValue):)?ARN(Equals|Like)(IfExists)?', re.IGNORECASE),
re.compile('((ForAllValues|ForAnyValue):)?String(Equals|Like)(IgnoreCase)?(IfExists)?', re.IGNORECASE),
re.compile('((ForAllValues|ForAnyValue):)?IpAddress(IfExists)?', re.IGNORECASE)]
for condition_operator in condition.keys():
if any(regex.match(condition_operator) for regex in relevant_condition_operators):
for key, value in condition[condition_operator].items():
# ForAllValues and ForAnyValue must be paired with a list.
# Otherwise, skip over entries.
if not isinstance(value, list) and condition_operator.lower().startswith('for'):
continue
if key.lower() in key_mapping:
if isinstance(value, list):
for v in value:
conditions.append(
ConditionTuple(value=v, category=key_mapping[key.lower()]))
else:
conditions.append(
ConditionTuple(value=value, category=key_mapping[key.lower()]))
return conditions |
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs) | deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions. | Below is the the instruction that describes the task:
### Input:
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
### Response:
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs) |
def create_gemini_db_orig(gemini_vcf, data, gemini_db=None, ped_file=None):
"""Original GEMINI specific data loader, only works with hg19/GRCh37.
"""
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not utils.file_exists(gemini_db):
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
load_opts = ""
if "gemini_allvariants" not in dd.get_tools_on(data):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
gemini_dir = install.get_gemini_dir(data)
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
tmpdir = os.path.dirname(tx_gemini_db)
eanns = _get_effects_flag(data)
# Apply custom resource specifications, allowing use of alternative annotation_dir
resources = config_utils.get_resources("gemini", data["config"])
gemini_opts = " ".join([str(x) for x in resources["options"]]) if resources.get("options") else ""
exports = utils.local_path_export()
cmd = ("{exports} {gemini} {gemini_opts} load {load_opts} "
"-v {gemini_vcf} {eanns} --cores {num_cores} "
"--tempdir {tmpdir} {tx_gemini_db}")
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s" % gemini_vcf, data)
if ped_file:
cmd = [gemini, "amend", "--sample", ped_file, tx_gemini_db]
do.run(cmd, "Add PED file to gemini database", data)
return gemini_db | Original GEMINI specific data loader, only works with hg19/GRCh37. | Below is the the instruction that describes the task:
### Input:
Original GEMINI specific data loader, only works with hg19/GRCh37.
### Response:
def create_gemini_db_orig(gemini_vcf, data, gemini_db=None, ped_file=None):
"""Original GEMINI specific data loader, only works with hg19/GRCh37.
"""
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not utils.file_exists(gemini_db):
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
load_opts = ""
if "gemini_allvariants" not in dd.get_tools_on(data):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
gemini_dir = install.get_gemini_dir(data)
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
tmpdir = os.path.dirname(tx_gemini_db)
eanns = _get_effects_flag(data)
# Apply custom resource specifications, allowing use of alternative annotation_dir
resources = config_utils.get_resources("gemini", data["config"])
gemini_opts = " ".join([str(x) for x in resources["options"]]) if resources.get("options") else ""
exports = utils.local_path_export()
cmd = ("{exports} {gemini} {gemini_opts} load {load_opts} "
"-v {gemini_vcf} {eanns} --cores {num_cores} "
"--tempdir {tmpdir} {tx_gemini_db}")
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s" % gemini_vcf, data)
if ped_file:
cmd = [gemini, "amend", "--sample", ped_file, tx_gemini_db]
do.run(cmd, "Add PED file to gemini database", data)
return gemini_db |
def _align(self, axis):
"""
Align spark bolt array so that axes for iteration are in the keys.
This operation is applied before most functional operators.
It ensures that the specified axes are valid, and swaps
key/value axes so that functional operators can be applied
over the correct records.
Parameters
----------
axis: tuple[int]
One or more axes that wil be iterated over by a functional operator
Returns
-------
BoltArraySpark
"""
# ensure that the specified axes are valid
inshape(self.shape, axis)
# find the value axes that should be moved into the keys (axis >= split)
tokeys = [(a - self.split) for a in axis if a >= self.split]
# find the key axes that should be moved into the values (axis < split)
tovalues = [a for a in range(self.split) if a not in axis]
if tokeys or tovalues:
return self.swap(tovalues, tokeys)
else:
return self | Align spark bolt array so that axes for iteration are in the keys.
This operation is applied before most functional operators.
It ensures that the specified axes are valid, and swaps
key/value axes so that functional operators can be applied
over the correct records.
Parameters
----------
axis: tuple[int]
One or more axes that wil be iterated over by a functional operator
Returns
-------
BoltArraySpark | Below is the the instruction that describes the task:
### Input:
Align spark bolt array so that axes for iteration are in the keys.
This operation is applied before most functional operators.
It ensures that the specified axes are valid, and swaps
key/value axes so that functional operators can be applied
over the correct records.
Parameters
----------
axis: tuple[int]
One or more axes that wil be iterated over by a functional operator
Returns
-------
BoltArraySpark
### Response:
def _align(self, axis):
"""
Align spark bolt array so that axes for iteration are in the keys.
This operation is applied before most functional operators.
It ensures that the specified axes are valid, and swaps
key/value axes so that functional operators can be applied
over the correct records.
Parameters
----------
axis: tuple[int]
One or more axes that wil be iterated over by a functional operator
Returns
-------
BoltArraySpark
"""
# ensure that the specified axes are valid
inshape(self.shape, axis)
# find the value axes that should be moved into the keys (axis >= split)
tokeys = [(a - self.split) for a in axis if a >= self.split]
# find the key axes that should be moved into the values (axis < split)
tovalues = [a for a in range(self.split) if a not in axis]
if tokeys or tovalues:
return self.swap(tovalues, tokeys)
else:
return self |
def file_is_present(self, file_path):
"""
check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist
"""
real_path = self.cont_path(file_path)
if not os.path.exists(real_path):
return False
if not os.path.isfile(real_path):
raise IOError("%s is not a file" % file_path)
return True | check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist | Below is the the instruction that describes the task:
### Input:
check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist
### Response:
def file_is_present(self, file_path):
"""
check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist
"""
real_path = self.cont_path(file_path)
if not os.path.exists(real_path):
return False
if not os.path.isfile(real_path):
raise IOError("%s is not a file" % file_path)
return True |
def container_similarity_vector(container1=None,packages_set=None,by=None):
'''container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a custom
list, should define custom_set.
:param container1: singularity image or singularity hub container.
:param packages_set: a name of a package set, provided are docker-os and docker-library
:by: metrics to compare by (files.txt and or folders.txt)
'''
if by == None:
by = ['files.txt']
if not isinstance(by,list):
by = [by]
if not isinstance(packages_set,list):
packages_set = [packages_set]
comparisons = dict()
for b in by:
bot.debug("Starting comparisons for %s" %b)
df = pandas.DataFrame(columns=packages_set)
for package2 in packages_set:
sim = calculate_similarity(container1=container1,
image_package2=package2,
by=b)[b]
name1 = os.path.basename(package2).replace('.img.zip','')
bot.debug("container vs. %s: %s" %(name1,sim))
df.loc["container",package2] = sim
df.columns = [os.path.basename(x).replace('.img.zip','') for x in df.columns.tolist()]
comparisons[b] = df
return comparisons | container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a custom
list, should define custom_set.
:param container1: singularity image or singularity hub container.
:param packages_set: a name of a package set, provided are docker-os and docker-library
:by: metrics to compare by (files.txt and or folders.txt) | Below is the the instruction that describes the task:
### Input:
container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a custom
list, should define custom_set.
:param container1: singularity image or singularity hub container.
:param packages_set: a name of a package set, provided are docker-os and docker-library
:by: metrics to compare by (files.txt and or folders.txt)
### Response:
def container_similarity_vector(container1=None,packages_set=None,by=None):
'''container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a custom
list, should define custom_set.
:param container1: singularity image or singularity hub container.
:param packages_set: a name of a package set, provided are docker-os and docker-library
:by: metrics to compare by (files.txt and or folders.txt)
'''
if by == None:
by = ['files.txt']
if not isinstance(by,list):
by = [by]
if not isinstance(packages_set,list):
packages_set = [packages_set]
comparisons = dict()
for b in by:
bot.debug("Starting comparisons for %s" %b)
df = pandas.DataFrame(columns=packages_set)
for package2 in packages_set:
sim = calculate_similarity(container1=container1,
image_package2=package2,
by=b)[b]
name1 = os.path.basename(package2).replace('.img.zip','')
bot.debug("container vs. %s: %s" %(name1,sim))
df.loc["container",package2] = sim
df.columns = [os.path.basename(x).replace('.img.zip','') for x in df.columns.tolist()]
comparisons[b] = df
return comparisons |
def Gharagheizi_gas(T, MW, Tb, Pc, omega):
r'''Estimates the thermal conductivity of a gas as a function of
temperature using the CSP method of Gharagheizi [1]_. A convoluted
method claiming high-accuracy and using only statistically significant
variable following analalysis.
Requires temperature, molecular weight, boiling temperature and critical
pressure and acentric factor.
.. math::
k = 7.9505\times 10^{-4} + 3.989\times 10^{-5} T
-5.419\times 10^-5 M + 3.989\times 10^{-5} A
A = \frac{\left(2\omega + T - \frac{(2\omega + 3.2825)T}{T_b} + 3.2825\right)}{0.1MP_cT}
\times (3.9752\omega + 0.1 P_c + 1.9876B + 6.5243)^2
Parameters
----------
T : float
Temperature of the fluid [K]
MW: float
Molecular weight of the fluid [g/mol]
Tb : float
Boiling temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of the fluid [-]
Returns
-------
kg : float
Estimated gas thermal conductivity [W/m/k]
Notes
-----
Pressure is internally converted into 10*kPa but author used correlation with
kPa; overall, errors have been corrected in the presentation of the formula.
This equation was derived with 15927 points and 1574 compounds.
Example value from [1]_ is the first point in the supportinf info, for CH4.
Examples
--------
>>> Gharagheizi_gas(580., 16.04246, 111.66, 4599000.0, 0.0115478000)
0.09594861261873211
References
----------
.. [1] Gharagheizi, Farhad, Poorandokht Ilani-Kashkouli, Mehdi Sattari,
Amir H. Mohammadi, Deresh Ramjugernath, and Dominique Richon.
"Development of a General Model for Determination of Thermal
Conductivity of Liquid Chemical Compounds at Atmospheric Pressure."
AIChE Journal 59, no. 5 (May 1, 2013): 1702-8. doi:10.1002/aic.13938
'''
Pc = Pc/1E4
B = T + (2.*omega + 2.*T - 2.*T*(2.*omega + 3.2825)/Tb + 3.2825)/(2*omega + T - T*(2*omega+3.2825)/Tb + 3.2825) - T*(2*omega+3.2825)/Tb
A = (2*omega + T - T*(2*omega + 3.2825)/Tb + 3.2825)/(0.1*MW*Pc*T) * (3.9752*omega + 0.1*Pc + 1.9876*B + 6.5243)**2
return 7.9505E-4 + 3.989E-5*T - 5.419E-5*MW + 3.989E-5*A | r'''Estimates the thermal conductivity of a gas as a function of
temperature using the CSP method of Gharagheizi [1]_. A convoluted
method claiming high-accuracy and using only statistically significant
variable following analalysis.
Requires temperature, molecular weight, boiling temperature and critical
pressure and acentric factor.
.. math::
k = 7.9505\times 10^{-4} + 3.989\times 10^{-5} T
-5.419\times 10^-5 M + 3.989\times 10^{-5} A
A = \frac{\left(2\omega + T - \frac{(2\omega + 3.2825)T}{T_b} + 3.2825\right)}{0.1MP_cT}
\times (3.9752\omega + 0.1 P_c + 1.9876B + 6.5243)^2
Parameters
----------
T : float
Temperature of the fluid [K]
MW: float
Molecular weight of the fluid [g/mol]
Tb : float
Boiling temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of the fluid [-]
Returns
-------
kg : float
Estimated gas thermal conductivity [W/m/k]
Notes
-----
Pressure is internally converted into 10*kPa but author used correlation with
kPa; overall, errors have been corrected in the presentation of the formula.
This equation was derived with 15927 points and 1574 compounds.
Example value from [1]_ is the first point in the supportinf info, for CH4.
Examples
--------
>>> Gharagheizi_gas(580., 16.04246, 111.66, 4599000.0, 0.0115478000)
0.09594861261873211
References
----------
.. [1] Gharagheizi, Farhad, Poorandokht Ilani-Kashkouli, Mehdi Sattari,
Amir H. Mohammadi, Deresh Ramjugernath, and Dominique Richon.
"Development of a General Model for Determination of Thermal
Conductivity of Liquid Chemical Compounds at Atmospheric Pressure."
AIChE Journal 59, no. 5 (May 1, 2013): 1702-8. doi:10.1002/aic.13938 | Below is the the instruction that describes the task:
### Input:
r'''Estimates the thermal conductivity of a gas as a function of
temperature using the CSP method of Gharagheizi [1]_. A convoluted
method claiming high-accuracy and using only statistically significant
variable following analalysis.
Requires temperature, molecular weight, boiling temperature and critical
pressure and acentric factor.
.. math::
k = 7.9505\times 10^{-4} + 3.989\times 10^{-5} T
-5.419\times 10^-5 M + 3.989\times 10^{-5} A
A = \frac{\left(2\omega + T - \frac{(2\omega + 3.2825)T}{T_b} + 3.2825\right)}{0.1MP_cT}
\times (3.9752\omega + 0.1 P_c + 1.9876B + 6.5243)^2
Parameters
----------
T : float
Temperature of the fluid [K]
MW: float
Molecular weight of the fluid [g/mol]
Tb : float
Boiling temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of the fluid [-]
Returns
-------
kg : float
Estimated gas thermal conductivity [W/m/k]
Notes
-----
Pressure is internally converted into 10*kPa but author used correlation with
kPa; overall, errors have been corrected in the presentation of the formula.
This equation was derived with 15927 points and 1574 compounds.
Example value from [1]_ is the first point in the supportinf info, for CH4.
Examples
--------
>>> Gharagheizi_gas(580., 16.04246, 111.66, 4599000.0, 0.0115478000)
0.09594861261873211
References
----------
.. [1] Gharagheizi, Farhad, Poorandokht Ilani-Kashkouli, Mehdi Sattari,
Amir H. Mohammadi, Deresh Ramjugernath, and Dominique Richon.
"Development of a General Model for Determination of Thermal
Conductivity of Liquid Chemical Compounds at Atmospheric Pressure."
AIChE Journal 59, no. 5 (May 1, 2013): 1702-8. doi:10.1002/aic.13938
### Response:
def Gharagheizi_gas(T, MW, Tb, Pc, omega):
r'''Estimates the thermal conductivity of a gas as a function of
temperature using the CSP method of Gharagheizi [1]_. A convoluted
method claiming high-accuracy and using only statistically significant
variable following analalysis.
Requires temperature, molecular weight, boiling temperature and critical
pressure and acentric factor.
.. math::
k = 7.9505\times 10^{-4} + 3.989\times 10^{-5} T
-5.419\times 10^-5 M + 3.989\times 10^{-5} A
A = \frac{\left(2\omega + T - \frac{(2\omega + 3.2825)T}{T_b} + 3.2825\right)}{0.1MP_cT}
\times (3.9752\omega + 0.1 P_c + 1.9876B + 6.5243)^2
Parameters
----------
T : float
Temperature of the fluid [K]
MW: float
Molecular weight of the fluid [g/mol]
Tb : float
Boiling temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of the fluid [-]
Returns
-------
kg : float
Estimated gas thermal conductivity [W/m/k]
Notes
-----
Pressure is internally converted into 10*kPa but author used correlation with
kPa; overall, errors have been corrected in the presentation of the formula.
This equation was derived with 15927 points and 1574 compounds.
Example value from [1]_ is the first point in the supportinf info, for CH4.
Examples
--------
>>> Gharagheizi_gas(580., 16.04246, 111.66, 4599000.0, 0.0115478000)
0.09594861261873211
References
----------
.. [1] Gharagheizi, Farhad, Poorandokht Ilani-Kashkouli, Mehdi Sattari,
Amir H. Mohammadi, Deresh Ramjugernath, and Dominique Richon.
"Development of a General Model for Determination of Thermal
Conductivity of Liquid Chemical Compounds at Atmospheric Pressure."
AIChE Journal 59, no. 5 (May 1, 2013): 1702-8. doi:10.1002/aic.13938
'''
Pc = Pc/1E4
B = T + (2.*omega + 2.*T - 2.*T*(2.*omega + 3.2825)/Tb + 3.2825)/(2*omega + T - T*(2*omega+3.2825)/Tb + 3.2825) - T*(2*omega+3.2825)/Tb
A = (2*omega + T - T*(2*omega + 3.2825)/Tb + 3.2825)/(0.1*MW*Pc*T) * (3.9752*omega + 0.1*Pc + 1.9876*B + 6.5243)**2
return 7.9505E-4 + 3.989E-5*T - 5.419E-5*MW + 3.989E-5*A |
def securityEventSSE(symbols=None, on_data=None, token='', version=''):
'''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('security-event', symbols, on_data, token, version) | The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version | Below is the the instruction that describes the task:
### Input:
The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
### Response:
def securityEventSSE(symbols=None, on_data=None, token='', version=''):
'''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('security-event', symbols, on_data, token, version) |
def kitchen_list(backend):
"""
List all Kitchens
"""
click.echo(click.style('%s - Getting the list of kitchens' % get_datetime(), fg='green'))
check_and_print(DKCloudCommandRunner.list_kitchen(backend.dki)) | List all Kitchens | Below is the the instruction that describes the task:
### Input:
List all Kitchens
### Response:
def kitchen_list(backend):
"""
List all Kitchens
"""
click.echo(click.style('%s - Getting the list of kitchens' % get_datetime(), fg='green'))
check_and_print(DKCloudCommandRunner.list_kitchen(backend.dki)) |
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True
self._t_last = time() | Start the timer. | Below is the the instruction that describes the task:
### Input:
Start the timer.
### Response:
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True
self._t_last = time() |
def read_stack_dwords(self, count, offset = 0):
"""
Reads DWORDs from the top of the stack.
@type count: int
@param count: Number of DWORDs to read.
@type offset: int
@param offset: Offset from the stack pointer to begin reading.
@rtype: tuple( int... )
@return: Tuple of integers read from the stack.
@raise WindowsError: Could not read the requested data.
"""
if count > 0:
stackData = self.read_stack_data(count * 4, offset)
return struct.unpack('<'+('L'*count), stackData)
return () | Reads DWORDs from the top of the stack.
@type count: int
@param count: Number of DWORDs to read.
@type offset: int
@param offset: Offset from the stack pointer to begin reading.
@rtype: tuple( int... )
@return: Tuple of integers read from the stack.
@raise WindowsError: Could not read the requested data. | Below is the the instruction that describes the task:
### Input:
Reads DWORDs from the top of the stack.
@type count: int
@param count: Number of DWORDs to read.
@type offset: int
@param offset: Offset from the stack pointer to begin reading.
@rtype: tuple( int... )
@return: Tuple of integers read from the stack.
@raise WindowsError: Could not read the requested data.
### Response:
def read_stack_dwords(self, count, offset = 0):
"""
Reads DWORDs from the top of the stack.
@type count: int
@param count: Number of DWORDs to read.
@type offset: int
@param offset: Offset from the stack pointer to begin reading.
@rtype: tuple( int... )
@return: Tuple of integers read from the stack.
@raise WindowsError: Could not read the requested data.
"""
if count > 0:
stackData = self.read_stack_data(count * 4, offset)
return struct.unpack('<'+('L'*count), stackData)
return () |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.