code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b"" | Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway. | Below is the the instruction that describes the task:
### Input:
Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway.
### Response:
def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b"" |
def create_authn_query_response(self, subject, session_index=None,
requested_context=None, in_response_to=None,
issuer=None, sign_response=False,
status=None, sign_alg=None, digest_alg=None,
**kwargs):
"""
A successful <Response> will contain one or more assertions containing
authentication statements.
:return:
"""
margs = self.message_args()
asserts = []
for statement in self.session_db.get_authn_statements(
subject.name_id, session_index, requested_context):
asserts.append(saml.Assertion(authn_statement=statement,
subject=subject, **margs))
if asserts:
args = {"assertion": asserts}
else:
args = {}
return self._response(in_response_to, "", status, issuer,
sign_response, to_sign=[], sign_alg=sign_alg,
digest_alg=digest_alg, **args) | A successful <Response> will contain one or more assertions containing
authentication statements.
:return: | Below is the the instruction that describes the task:
### Input:
A successful <Response> will contain one or more assertions containing
authentication statements.
:return:
### Response:
def create_authn_query_response(self, subject, session_index=None,
requested_context=None, in_response_to=None,
issuer=None, sign_response=False,
status=None, sign_alg=None, digest_alg=None,
**kwargs):
"""
A successful <Response> will contain one or more assertions containing
authentication statements.
:return:
"""
margs = self.message_args()
asserts = []
for statement in self.session_db.get_authn_statements(
subject.name_id, session_index, requested_context):
asserts.append(saml.Assertion(authn_statement=statement,
subject=subject, **margs))
if asserts:
args = {"assertion": asserts}
else:
args = {}
return self._response(in_response_to, "", status, issuer,
sign_response, to_sign=[], sign_alg=sign_alg,
digest_alg=digest_alg, **args) |
def predict(
self, user_ids, item_ids, item_features=None, user_features=None, num_threads=1
):
"""
Compute the recommendation score for user-item pairs.
For details on how to use feature matrices, see the documentation
on the :class:`lightfm.LightFM` class.
Arguments
---------
user_ids: integer or np.int32 array of shape [n_pairs,]
single user id or an array containing the user ids for the
user-item pairs for which a prediction is to be computed. Note
that these are LightFM's internal id's, i.e. the index of the
user in the interaction matrix used for fitting the model.
item_ids: np.int32 array of shape [n_pairs,]
an array containing the item ids for the user-item pairs for which
a prediction is to be computed. Note that these are LightFM's
internal id's, i.e. the index of the item in the interaction
matrix used for fitting the model.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
Returns
-------
np.float32 array of shape [n_pairs,]
Numpy array containing the recommendation scores for pairs defined
by the inputs.
"""
self._check_initialized()
if not isinstance(user_ids, np.ndarray):
user_ids = np.repeat(np.int32(user_ids), len(item_ids))
if isinstance(item_ids, (list, tuple)):
item_ids = np.array(item_ids, dtype=np.int32)
assert len(user_ids) == len(item_ids)
if user_ids.dtype != np.int32:
user_ids = user_ids.astype(np.int32)
if item_ids.dtype != np.int32:
item_ids = item_ids.astype(np.int32)
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
if user_ids.min() < 0 or item_ids.min() < 0:
raise ValueError(
"User or item ids cannot be negative. "
"Check your inputs for negative numbers "
"or very large numbers that can overflow."
)
n_users = user_ids.max() + 1
n_items = item_ids.max() + 1
(user_features, item_features) = self._construct_feature_matrices(
n_users, n_items, user_features, item_features
)
lightfm_data = self._get_lightfm_data()
predictions = np.empty(len(user_ids), dtype=np.float64)
predict_lightfm(
CSRMatrix(item_features),
CSRMatrix(user_features),
user_ids,
item_ids,
predictions,
lightfm_data,
num_threads,
)
return predictions | Compute the recommendation score for user-item pairs.
For details on how to use feature matrices, see the documentation
on the :class:`lightfm.LightFM` class.
Arguments
---------
user_ids: integer or np.int32 array of shape [n_pairs,]
single user id or an array containing the user ids for the
user-item pairs for which a prediction is to be computed. Note
that these are LightFM's internal id's, i.e. the index of the
user in the interaction matrix used for fitting the model.
item_ids: np.int32 array of shape [n_pairs,]
an array containing the item ids for the user-item pairs for which
a prediction is to be computed. Note that these are LightFM's
internal id's, i.e. the index of the item in the interaction
matrix used for fitting the model.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
Returns
-------
np.float32 array of shape [n_pairs,]
Numpy array containing the recommendation scores for pairs defined
by the inputs. | Below is the the instruction that describes the task:
### Input:
Compute the recommendation score for user-item pairs.
For details on how to use feature matrices, see the documentation
on the :class:`lightfm.LightFM` class.
Arguments
---------
user_ids: integer or np.int32 array of shape [n_pairs,]
single user id or an array containing the user ids for the
user-item pairs for which a prediction is to be computed. Note
that these are LightFM's internal id's, i.e. the index of the
user in the interaction matrix used for fitting the model.
item_ids: np.int32 array of shape [n_pairs,]
an array containing the item ids for the user-item pairs for which
a prediction is to be computed. Note that these are LightFM's
internal id's, i.e. the index of the item in the interaction
matrix used for fitting the model.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
Returns
-------
np.float32 array of shape [n_pairs,]
Numpy array containing the recommendation scores for pairs defined
by the inputs.
### Response:
def predict(
self, user_ids, item_ids, item_features=None, user_features=None, num_threads=1
):
"""
Compute the recommendation score for user-item pairs.
For details on how to use feature matrices, see the documentation
on the :class:`lightfm.LightFM` class.
Arguments
---------
user_ids: integer or np.int32 array of shape [n_pairs,]
single user id or an array containing the user ids for the
user-item pairs for which a prediction is to be computed. Note
that these are LightFM's internal id's, i.e. the index of the
user in the interaction matrix used for fitting the model.
item_ids: np.int32 array of shape [n_pairs,]
an array containing the item ids for the user-item pairs for which
a prediction is to be computed. Note that these are LightFM's
internal id's, i.e. the index of the item in the interaction
matrix used for fitting the model.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
Returns
-------
np.float32 array of shape [n_pairs,]
Numpy array containing the recommendation scores for pairs defined
by the inputs.
"""
self._check_initialized()
if not isinstance(user_ids, np.ndarray):
user_ids = np.repeat(np.int32(user_ids), len(item_ids))
if isinstance(item_ids, (list, tuple)):
item_ids = np.array(item_ids, dtype=np.int32)
assert len(user_ids) == len(item_ids)
if user_ids.dtype != np.int32:
user_ids = user_ids.astype(np.int32)
if item_ids.dtype != np.int32:
item_ids = item_ids.astype(np.int32)
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
if user_ids.min() < 0 or item_ids.min() < 0:
raise ValueError(
"User or item ids cannot be negative. "
"Check your inputs for negative numbers "
"or very large numbers that can overflow."
)
n_users = user_ids.max() + 1
n_items = item_ids.max() + 1
(user_features, item_features) = self._construct_feature_matrices(
n_users, n_items, user_features, item_features
)
lightfm_data = self._get_lightfm_data()
predictions = np.empty(len(user_ids), dtype=np.float64)
predict_lightfm(
CSRMatrix(item_features),
CSRMatrix(user_features),
user_ids,
item_ids,
predictions,
lightfm_data,
num_threads,
)
return predictions |
def decode(decoder, data, length, frame_size, decode_fec, channels=2):
"""Decode an Opus frame
Unlike the `opus_decode` function , this function takes an additional parameter `channels`,
which indicates the number of channels in the frame
"""
pcm_size = frame_size * channels * ctypes.sizeof(ctypes.c_int16)
pcm = (ctypes.c_int16 * pcm_size)()
pcm_pointer = ctypes.cast(pcm, c_int16_pointer)
# Converting from a boolean to int
decode_fec = int(bool(decode_fec))
result = _decode(decoder, data, length, pcm_pointer, frame_size, decode_fec)
if result < 0:
raise OpusError(result)
return array.array('h', pcm).tostring() | Decode an Opus frame
Unlike the `opus_decode` function , this function takes an additional parameter `channels`,
which indicates the number of channels in the frame | Below is the the instruction that describes the task:
### Input:
Decode an Opus frame
Unlike the `opus_decode` function , this function takes an additional parameter `channels`,
which indicates the number of channels in the frame
### Response:
def decode(decoder, data, length, frame_size, decode_fec, channels=2):
"""Decode an Opus frame
Unlike the `opus_decode` function , this function takes an additional parameter `channels`,
which indicates the number of channels in the frame
"""
pcm_size = frame_size * channels * ctypes.sizeof(ctypes.c_int16)
pcm = (ctypes.c_int16 * pcm_size)()
pcm_pointer = ctypes.cast(pcm, c_int16_pointer)
# Converting from a boolean to int
decode_fec = int(bool(decode_fec))
result = _decode(decoder, data, length, pcm_pointer, frame_size, decode_fec)
if result < 0:
raise OpusError(result)
return array.array('h', pcm).tostring() |
def log_url (self, url_data):
"""
Put invalid url in blacklist, delete valid url from blacklist.
"""
key = (url_data.parent_url, url_data.cache_url)
key = repr(key)
if key in self.blacklist:
if url_data.valid:
del self.blacklist[key]
else:
self.blacklist[key] += 1
else:
if not url_data.valid:
self.blacklist[key] = 1 | Put invalid url in blacklist, delete valid url from blacklist. | Below is the the instruction that describes the task:
### Input:
Put invalid url in blacklist, delete valid url from blacklist.
### Response:
def log_url (self, url_data):
"""
Put invalid url in blacklist, delete valid url from blacklist.
"""
key = (url_data.parent_url, url_data.cache_url)
key = repr(key)
if key in self.blacklist:
if url_data.valid:
del self.blacklist[key]
else:
self.blacklist[key] += 1
else:
if not url_data.valid:
self.blacklist[key] = 1 |
def emit(self, event, *args, **kwargs):
"""Use this to send a structured event, with a name and arguments, to
the client.
By default, it uses this namespace's endpoint. You can send messages on
other endpoints with something like:
``self.socket['/other_endpoint'].emit()``.
However, it is possible that the ``'/other_endpoint'`` was not
initialized yet, and that would yield a ``KeyError``.
The only supported ``kwargs`` is ``callback``. All other parameters
must be passed positionally.
:param event: The name of the event to trigger on the other end.
:param callback: Pass in the callback keyword argument to define a
call-back that will be called when the client acks.
This callback is slightly different from the one from
``send()``, as this callback will receive parameters
from the explicit call of the ``ack()`` function
passed to the listener on the client side.
The remote listener will need to explicitly ack (by
calling its last argument, a function which is
usually called 'ack') with some parameters indicating
success or error. The 'ack' packet coming back here
will then trigger the callback function with the
returned values.
:type callback: callable
"""
callback = kwargs.pop('callback', None)
if kwargs:
raise ValueError(
"emit() only supports positional argument, to stay "
"compatible with the Socket.IO protocol. You can "
"however pass in a dictionary as the first argument")
pkt = dict(type="event", name=event, args=args,
endpoint=self.ns_name)
if callback:
# By passing 'data', we indicate that we *want* an explicit ack
# by the client code, not an automatic as with send().
pkt['ack'] = 'data'
pkt['id'] = msgid = self.socket._get_next_msgid()
self.socket._save_ack_callback(msgid, callback)
self.socket.send_packet(pkt) | Use this to send a structured event, with a name and arguments, to
the client.
By default, it uses this namespace's endpoint. You can send messages on
other endpoints with something like:
``self.socket['/other_endpoint'].emit()``.
However, it is possible that the ``'/other_endpoint'`` was not
initialized yet, and that would yield a ``KeyError``.
The only supported ``kwargs`` is ``callback``. All other parameters
must be passed positionally.
:param event: The name of the event to trigger on the other end.
:param callback: Pass in the callback keyword argument to define a
call-back that will be called when the client acks.
This callback is slightly different from the one from
``send()``, as this callback will receive parameters
from the explicit call of the ``ack()`` function
passed to the listener on the client side.
The remote listener will need to explicitly ack (by
calling its last argument, a function which is
usually called 'ack') with some parameters indicating
success or error. The 'ack' packet coming back here
will then trigger the callback function with the
returned values.
:type callback: callable | Below is the the instruction that describes the task:
### Input:
Use this to send a structured event, with a name and arguments, to
the client.
By default, it uses this namespace's endpoint. You can send messages on
other endpoints with something like:
``self.socket['/other_endpoint'].emit()``.
However, it is possible that the ``'/other_endpoint'`` was not
initialized yet, and that would yield a ``KeyError``.
The only supported ``kwargs`` is ``callback``. All other parameters
must be passed positionally.
:param event: The name of the event to trigger on the other end.
:param callback: Pass in the callback keyword argument to define a
call-back that will be called when the client acks.
This callback is slightly different from the one from
``send()``, as this callback will receive parameters
from the explicit call of the ``ack()`` function
passed to the listener on the client side.
The remote listener will need to explicitly ack (by
calling its last argument, a function which is
usually called 'ack') with some parameters indicating
success or error. The 'ack' packet coming back here
will then trigger the callback function with the
returned values.
:type callback: callable
### Response:
def emit(self, event, *args, **kwargs):
"""Use this to send a structured event, with a name and arguments, to
the client.
By default, it uses this namespace's endpoint. You can send messages on
other endpoints with something like:
``self.socket['/other_endpoint'].emit()``.
However, it is possible that the ``'/other_endpoint'`` was not
initialized yet, and that would yield a ``KeyError``.
The only supported ``kwargs`` is ``callback``. All other parameters
must be passed positionally.
:param event: The name of the event to trigger on the other end.
:param callback: Pass in the callback keyword argument to define a
call-back that will be called when the client acks.
This callback is slightly different from the one from
``send()``, as this callback will receive parameters
from the explicit call of the ``ack()`` function
passed to the listener on the client side.
The remote listener will need to explicitly ack (by
calling its last argument, a function which is
usually called 'ack') with some parameters indicating
success or error. The 'ack' packet coming back here
will then trigger the callback function with the
returned values.
:type callback: callable
"""
callback = kwargs.pop('callback', None)
if kwargs:
raise ValueError(
"emit() only supports positional argument, to stay "
"compatible with the Socket.IO protocol. You can "
"however pass in a dictionary as the first argument")
pkt = dict(type="event", name=event, args=args,
endpoint=self.ns_name)
if callback:
# By passing 'data', we indicate that we *want* an explicit ack
# by the client code, not an automatic as with send().
pkt['ack'] = 'data'
pkt['id'] = msgid = self.socket._get_next_msgid()
self.socket._save_ack_callback(msgid, callback)
self.socket.send_packet(pkt) |
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj | Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse. | Below is the the instruction that describes the task:
### Input:
Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
### Response:
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj |
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json() | Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema. | Below is the the instruction that describes the task:
### Input:
Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema.
### Response:
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json() |
def add_item(self, host, key, value, clock=None, state=0):
"""
Add a single item into DataContainer
:host: hostname to which item will be linked to
:key: item key as defined in Zabbix
:value: item value
:clock: timestemp as integer. If not provided self.clock()) will be used
"""
if clock is None:
clock = self.clock
if self._config.data_type == "items":
item = {"host": host, "key": key,
"value": value, "clock": clock, "state": state}
elif self._config.data_type == "lld":
item = {"host": host, "key": key, "clock": clock, "state": state,
"value": json.dumps({"data": value})}
else:
if self.logger: # pragma: no cover
self.logger.error("Setup data_type before adding data")
raise ValueError('Setup data_type before adding data')
self._items_list.append(item) | Add a single item into DataContainer
:host: hostname to which item will be linked to
:key: item key as defined in Zabbix
:value: item value
:clock: timestemp as integer. If not provided self.clock()) will be used | Below is the the instruction that describes the task:
### Input:
Add a single item into DataContainer
:host: hostname to which item will be linked to
:key: item key as defined in Zabbix
:value: item value
:clock: timestemp as integer. If not provided self.clock()) will be used
### Response:
def add_item(self, host, key, value, clock=None, state=0):
"""
Add a single item into DataContainer
:host: hostname to which item will be linked to
:key: item key as defined in Zabbix
:value: item value
:clock: timestemp as integer. If not provided self.clock()) will be used
"""
if clock is None:
clock = self.clock
if self._config.data_type == "items":
item = {"host": host, "key": key,
"value": value, "clock": clock, "state": state}
elif self._config.data_type == "lld":
item = {"host": host, "key": key, "clock": clock, "state": state,
"value": json.dumps({"data": value})}
else:
if self.logger: # pragma: no cover
self.logger.error("Setup data_type before adding data")
raise ValueError('Setup data_type before adding data')
self._items_list.append(item) |
def prepareSystem(cls):
"""
Prepares this system for the downloading and lookup of resources. This method should only
be invoked on a worker node. It is idempotent but not thread-safe.
"""
try:
resourceRootDirPath = os.environ[cls.rootDirPathEnvName]
except KeyError:
# Create directory holding local copies of requested resources ...
resourceRootDirPath = mkdtemp()
# .. and register its location in an environment variable such that child processes
# can find it.
os.environ[cls.rootDirPathEnvName] = resourceRootDirPath
assert os.path.isdir(resourceRootDirPath) | Prepares this system for the downloading and lookup of resources. This method should only
be invoked on a worker node. It is idempotent but not thread-safe. | Below is the the instruction that describes the task:
### Input:
Prepares this system for the downloading and lookup of resources. This method should only
be invoked on a worker node. It is idempotent but not thread-safe.
### Response:
def prepareSystem(cls):
"""
Prepares this system for the downloading and lookup of resources. This method should only
be invoked on a worker node. It is idempotent but not thread-safe.
"""
try:
resourceRootDirPath = os.environ[cls.rootDirPathEnvName]
except KeyError:
# Create directory holding local copies of requested resources ...
resourceRootDirPath = mkdtemp()
# .. and register its location in an environment variable such that child processes
# can find it.
os.environ[cls.rootDirPathEnvName] = resourceRootDirPath
assert os.path.isdir(resourceRootDirPath) |
def get_thellier_gui_meas_mapping(input_df, output=2):
"""
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
"""
if int(output) == 2:
thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy()
if 'treat_step_num' in input_df.columns:
thellier_gui_meas3_2_meas2_map.update(
{'treat_step_num': 'measurement_number'})
thellier_gui_meas3_2_meas2_map.pop('measurement')
return thellier_gui_meas3_2_meas2_map
# 2 --> 3
else:
thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy()
if 'measurement' in input_df.columns:
thellier_gui_meas2_2_meas3_map.pop('measurement_number')
try:
res = int(input_df.iloc[0]['measurement_number'])
if res < 100:
thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num'
except ValueError as ex:
pass
return thellier_gui_meas2_2_meas3_map | Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui) | Below is the the instruction that describes the task:
### Input:
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
### Response:
def get_thellier_gui_meas_mapping(input_df, output=2):
"""
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
"""
if int(output) == 2:
thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy()
if 'treat_step_num' in input_df.columns:
thellier_gui_meas3_2_meas2_map.update(
{'treat_step_num': 'measurement_number'})
thellier_gui_meas3_2_meas2_map.pop('measurement')
return thellier_gui_meas3_2_meas2_map
# 2 --> 3
else:
thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy()
if 'measurement' in input_df.columns:
thellier_gui_meas2_2_meas3_map.pop('measurement_number')
try:
res = int(input_df.iloc[0]['measurement_number'])
if res < 100:
thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num'
except ValueError as ex:
pass
return thellier_gui_meas2_2_meas3_map |
def dist01(graph, weight, source=0, target=None):
"""Shortest path in a 0,1 weighted graph
:param graph: directed graph in listlist or listdict format
:param weight: matrix or adjacency dictionary
:param int source: vertex
:param target: exploration stops once distance to target is found
:returns: distance table, predecessor table
:complexity: `O(|V|+|E|)`
"""
n = len(graph)
dist = [float('inf')] * n
prec = [None] * n
black = [False] * n
dist[source] = 0
gray = deque([source])
while gray:
node = gray.pop()
if black[node]:
continue
black[node] = True
if node == target:
break
for neighbor in graph[node]:
ell = dist[node] + weight[node][neighbor]
if black[neighbor] or dist[neighbor] <= ell:
continue
dist[neighbor] = ell
prec[neighbor] = node
if weight[node][neighbor] == 0:
gray.append(neighbor)
else:
gray.appendleft(neighbor)
return dist, prec | Shortest path in a 0,1 weighted graph
:param graph: directed graph in listlist or listdict format
:param weight: matrix or adjacency dictionary
:param int source: vertex
:param target: exploration stops once distance to target is found
:returns: distance table, predecessor table
:complexity: `O(|V|+|E|)` | Below is the the instruction that describes the task:
### Input:
Shortest path in a 0,1 weighted graph
:param graph: directed graph in listlist or listdict format
:param weight: matrix or adjacency dictionary
:param int source: vertex
:param target: exploration stops once distance to target is found
:returns: distance table, predecessor table
:complexity: `O(|V|+|E|)`
### Response:
def dist01(graph, weight, source=0, target=None):
"""Shortest path in a 0,1 weighted graph
:param graph: directed graph in listlist or listdict format
:param weight: matrix or adjacency dictionary
:param int source: vertex
:param target: exploration stops once distance to target is found
:returns: distance table, predecessor table
:complexity: `O(|V|+|E|)`
"""
n = len(graph)
dist = [float('inf')] * n
prec = [None] * n
black = [False] * n
dist[source] = 0
gray = deque([source])
while gray:
node = gray.pop()
if black[node]:
continue
black[node] = True
if node == target:
break
for neighbor in graph[node]:
ell = dist[node] + weight[node][neighbor]
if black[neighbor] or dist[neighbor] <= ell:
continue
dist[neighbor] = ell
prec[neighbor] = node
if weight[node][neighbor] == 0:
gray.append(neighbor)
else:
gray.appendleft(neighbor)
return dist, prec |
def follow_link(self, link, **kwargs):
"""Click a link.
:param Tag link: Link to click
:param kwargs: Keyword arguments to `Session::send`
"""
try:
href = link['href']
except KeyError:
raise exceptions.RoboError('Link element must have "href" '
'attribute')
self.open(self._build_url(href), **kwargs) | Click a link.
:param Tag link: Link to click
:param kwargs: Keyword arguments to `Session::send` | Below is the the instruction that describes the task:
### Input:
Click a link.
:param Tag link: Link to click
:param kwargs: Keyword arguments to `Session::send`
### Response:
def follow_link(self, link, **kwargs):
"""Click a link.
:param Tag link: Link to click
:param kwargs: Keyword arguments to `Session::send`
"""
try:
href = link['href']
except KeyError:
raise exceptions.RoboError('Link element must have "href" '
'attribute')
self.open(self._build_url(href), **kwargs) |
def _validate_zip(the_zip):
"""Validate zipped data package
"""
datapackage_jsons = [f for f in the_zip.namelist() if f.endswith('datapackage.json')]
if len(datapackage_jsons) != 1:
msg = 'DataPackage must have only one "datapackage.json" (had {n})'
raise exceptions.DataPackageException(msg.format(n=len(datapackage_jsons))) | Validate zipped data package | Below is the the instruction that describes the task:
### Input:
Validate zipped data package
### Response:
def _validate_zip(the_zip):
"""Validate zipped data package
"""
datapackage_jsons = [f for f in the_zip.namelist() if f.endswith('datapackage.json')]
if len(datapackage_jsons) != 1:
msg = 'DataPackage must have only one "datapackage.json" (had {n})'
raise exceptions.DataPackageException(msg.format(n=len(datapackage_jsons))) |
def isHandlerPresent(self, event_name):
"""Check if an event has an handler."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
return self.handlers[event_name] is not None | Check if an event has an handler. | Below is the the instruction that describes the task:
### Input:
Check if an event has an handler.
### Response:
def isHandlerPresent(self, event_name):
"""Check if an event has an handler."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
return self.handlers[event_name] is not None |
def get_http_raw(self, url=None, retry_count=3, headers=None,
request_type='GET', form_data=None):
"""
The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed.
"""
if headers is None:
headers = {'Accept': 'text/html'}
enc_form_data = None
if form_data:
enc_form_data = urlencode(form_data)
try:
# Py 2 inspection will alert on the encoding arg, no harm done.
enc_form_data = bytes(enc_form_data, encoding='ascii')
except TypeError: # pragma: no cover
pass
try:
# Create the connection for the HTTP query.
log.debug('HTTP query for {0} at {1}'.format(
self.address_str, url))
try:
# Py 2 inspection alert bypassed by using kwargs dict.
conn = Request(url=url, data=enc_form_data, headers=headers,
**{'method': request_type})
except TypeError: # pragma: no cover
conn = Request(url=url, data=enc_form_data, headers=headers)
data = self.opener.open(conn, timeout=self.timeout)
try:
d = data.readall().decode('ascii', 'ignore')
except AttributeError: # pragma: no cover
d = data.read().decode('ascii', 'ignore')
return str(d)
except (URLError, socket.timeout, socket.error) as e:
log.debug('HTTP query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('HTTP query retrying (count: {0})'.format(
str(retry_count)))
return self.get_http_raw(
url=url, retry_count=retry_count - 1, headers=headers,
request_type=request_type, form_data=form_data
)
else:
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
url))
except HTTPLookupError as e: # pragma: no cover
raise e
except Exception: # pragma: no cover
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) | The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed. | Below is the the instruction that describes the task:
### Input:
The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed.
### Response:
def get_http_raw(self, url=None, retry_count=3, headers=None,
request_type='GET', form_data=None):
"""
The function for retrieving a raw HTML result via HTTP.
Args:
url (:obj:`str`): The URL to retrieve (required).
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
headers (:obj:`dict`): The HTTP headers. The Accept header
defaults to 'text/html'.
request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults
to 'GET'.
form_data (:obj:`dict`): Optional form POST data.
Returns:
str: The raw data.
Raises:
HTTPLookupError: The HTTP lookup failed.
"""
if headers is None:
headers = {'Accept': 'text/html'}
enc_form_data = None
if form_data:
enc_form_data = urlencode(form_data)
try:
# Py 2 inspection will alert on the encoding arg, no harm done.
enc_form_data = bytes(enc_form_data, encoding='ascii')
except TypeError: # pragma: no cover
pass
try:
# Create the connection for the HTTP query.
log.debug('HTTP query for {0} at {1}'.format(
self.address_str, url))
try:
# Py 2 inspection alert bypassed by using kwargs dict.
conn = Request(url=url, data=enc_form_data, headers=headers,
**{'method': request_type})
except TypeError: # pragma: no cover
conn = Request(url=url, data=enc_form_data, headers=headers)
data = self.opener.open(conn, timeout=self.timeout)
try:
d = data.readall().decode('ascii', 'ignore')
except AttributeError: # pragma: no cover
d = data.read().decode('ascii', 'ignore')
return str(d)
except (URLError, socket.timeout, socket.error) as e:
log.debug('HTTP query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('HTTP query retrying (count: {0})'.format(
str(retry_count)))
return self.get_http_raw(
url=url, retry_count=retry_count - 1, headers=headers,
request_type=request_type, form_data=form_data
)
else:
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
url))
except HTTPLookupError as e: # pragma: no cover
raise e
except Exception: # pragma: no cover
raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) |
def do_work(self, actions_queue, returns_queue, control_queue=None): # pragma: no cover
"""Main function of the worker.
* Get checks
* Launch new checks
* Manage finished checks
:param actions_queue: Global Queue Master->Slave
:type actions_queue: Queue.Queue
:param returns_queue: queue managed by manager
:type returns_queue: Queue.Queue
:return: None
"""
# restore default signal handler for the workers:
# signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.interrupted = False
self.set_exit_handler()
setproctitle("alignak-%s worker %s" % (self.loaded_into, self._id))
timeout = 1.0
self.checks = []
self.t_each_loop = time.time()
while True:
begin = time.time()
logger.debug("--- loop begin: %s", begin)
# If we are dying (big problem!) we do not
# take new jobs, we just finished the current one
if not self.i_am_dying:
# REF: doc/alignak-action-queues.png (3)
self.get_new_checks(actions_queue, returns_queue)
# REF: doc/alignak-action-queues.png (4)
self.launch_new_checks()
# REF: doc/alignak-action-queues.png (5)
self.manage_finished_checks(returns_queue)
logger.debug("loop middle, %d checks", len(self.checks))
# Now get order from master, if any...
if control_queue:
try:
control_message = control_queue.get_nowait()
logger.info("[%s] Got a message: %s", self._id, control_message)
if control_message and control_message.get_type() == 'Die':
logger.info("[%s] The master said we must die... :(", self._id)
break
except Full:
logger.warning("Worker control queue is full")
except Empty:
pass
except Exception as exp: # pylint: disable=broad-except
logger.error("Exception when getting master orders: %s. ", str(exp))
# Maybe someone asked us to die, if so, do it :)
if self.interrupted:
logger.info("I die because someone asked ;)")
break
# Look if we are dying, and if we finish all current checks
# if so, we really die, our master poller will launch a new
# worker because we were too weak to manage our job :(
if not self.checks and self.i_am_dying:
logger.warning("I die because I cannot do my job as I should "
"(too many open files?)... forgive me please.")
break
# Manage a possible time change (our avant will be change with the diff)
diff = self.check_for_system_time_change()
begin += diff
logger.debug("loop check timechange: %s", diff)
timeout -= time.time() - begin
if timeout < 0:
timeout = 1.0
else:
time.sleep(0.1)
logger.debug("+++ loop end: timeout = %s, idle: %s, checks: %d, "
"actions (got: %d, launched: %d, finished: %d)",
timeout, self._idletime, len(self.checks),
self.actions_got, self.actions_launched, self.actions_finished) | Main function of the worker.
* Get checks
* Launch new checks
* Manage finished checks
:param actions_queue: Global Queue Master->Slave
:type actions_queue: Queue.Queue
:param returns_queue: queue managed by manager
:type returns_queue: Queue.Queue
:return: None | Below is the the instruction that describes the task:
### Input:
Main function of the worker.
* Get checks
* Launch new checks
* Manage finished checks
:param actions_queue: Global Queue Master->Slave
:type actions_queue: Queue.Queue
:param returns_queue: queue managed by manager
:type returns_queue: Queue.Queue
:return: None
### Response:
def do_work(self, actions_queue, returns_queue, control_queue=None): # pragma: no cover
"""Main function of the worker.
* Get checks
* Launch new checks
* Manage finished checks
:param actions_queue: Global Queue Master->Slave
:type actions_queue: Queue.Queue
:param returns_queue: queue managed by manager
:type returns_queue: Queue.Queue
:return: None
"""
# restore default signal handler for the workers:
# signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.interrupted = False
self.set_exit_handler()
setproctitle("alignak-%s worker %s" % (self.loaded_into, self._id))
timeout = 1.0
self.checks = []
self.t_each_loop = time.time()
while True:
begin = time.time()
logger.debug("--- loop begin: %s", begin)
# If we are dying (big problem!) we do not
# take new jobs, we just finished the current one
if not self.i_am_dying:
# REF: doc/alignak-action-queues.png (3)
self.get_new_checks(actions_queue, returns_queue)
# REF: doc/alignak-action-queues.png (4)
self.launch_new_checks()
# REF: doc/alignak-action-queues.png (5)
self.manage_finished_checks(returns_queue)
logger.debug("loop middle, %d checks", len(self.checks))
# Now get order from master, if any...
if control_queue:
try:
control_message = control_queue.get_nowait()
logger.info("[%s] Got a message: %s", self._id, control_message)
if control_message and control_message.get_type() == 'Die':
logger.info("[%s] The master said we must die... :(", self._id)
break
except Full:
logger.warning("Worker control queue is full")
except Empty:
pass
except Exception as exp: # pylint: disable=broad-except
logger.error("Exception when getting master orders: %s. ", str(exp))
# Maybe someone asked us to die, if so, do it :)
if self.interrupted:
logger.info("I die because someone asked ;)")
break
# Look if we are dying, and if we finish all current checks
# if so, we really die, our master poller will launch a new
# worker because we were too weak to manage our job :(
if not self.checks and self.i_am_dying:
logger.warning("I die because I cannot do my job as I should "
"(too many open files?)... forgive me please.")
break
# Manage a possible time change (our avant will be change with the diff)
diff = self.check_for_system_time_change()
begin += diff
logger.debug("loop check timechange: %s", diff)
timeout -= time.time() - begin
if timeout < 0:
timeout = 1.0
else:
time.sleep(0.1)
logger.debug("+++ loop end: timeout = %s, idle: %s, checks: %d, "
"actions (got: %d, launched: %d, finished: %d)",
timeout, self._idletime, len(self.checks),
self.actions_got, self.actions_launched, self.actions_finished) |
def max_run_length(x: np.ndarray, val: int):
"""Finds the maximum continuous length of the given value in the sequence"""
if x.size == 0:
return 0
else:
y = np.array(x[1:] != x[:-1])
i = np.append(np.where(y), len(x) - 1)
run_lengths = np.diff(np.append(-1, i))
run_length_values = x[i]
return max([rl for rl, v in zip(run_lengths, run_length_values) if v == val], default=0) | Finds the maximum continuous length of the given value in the sequence | Below is the the instruction that describes the task:
### Input:
Finds the maximum continuous length of the given value in the sequence
### Response:
def max_run_length(x: np.ndarray, val: int):
"""Finds the maximum continuous length of the given value in the sequence"""
if x.size == 0:
return 0
else:
y = np.array(x[1:] != x[:-1])
i = np.append(np.where(y), len(x) - 1)
run_lengths = np.diff(np.append(-1, i))
run_length_values = x[i]
return max([rl for rl, v in zip(run_lengths, run_length_values) if v == val], default=0) |
def compute(chart):
""" Computes the behavior. """
factors = []
# Planets in House1 or Conjunct Asc
house1 = chart.getHouse(const.HOUSE1)
planetsHouse1 = chart.objects.getObjectsInHouse(house1)
asc = chart.getAngle(const.ASC)
planetsConjAsc = chart.objects.getObjectsAspecting(asc, [0])
_set = _merge(planetsHouse1, planetsConjAsc)
factors.append(['Planets in House1 or Conj Asc', _set])
# Planets conjunct Moon or Mercury
moon = chart.get(const.MOON)
mercury = chart.get(const.MERCURY)
planetsConjMoon = chart.objects.getObjectsAspecting(moon, [0])
planetsConjMercury = chart.objects.getObjectsAspecting(mercury, [0])
_set = _merge(planetsConjMoon, planetsConjMercury)
factors.append(['Planets Conj Moon or Mercury', _set])
# Asc ruler if aspected by disposer
ascRulerID = essential.ruler(asc.sign)
ascRuler = chart.getObject(ascRulerID)
disposerID = essential.ruler(ascRuler.sign)
disposer = chart.getObject(disposerID)
_set = []
if aspects.isAspecting(disposer, ascRuler, const.MAJOR_ASPECTS):
_set = [ascRuler.id]
factors.append(['Asc Ruler if aspected by its disposer', _set]);
# Planets aspecting Moon or Mercury
aspMoon = chart.objects.getObjectsAspecting(moon, [60,90,120,180])
aspMercury = chart.objects.getObjectsAspecting(mercury, [60,90,120,180])
_set = _merge(aspMoon, aspMercury)
factors.append(['Planets Asp Moon or Mercury', _set])
return factors | Computes the behavior. | Below is the the instruction that describes the task:
### Input:
Computes the behavior.
### Response:
def compute(chart):
""" Computes the behavior. """
factors = []
# Planets in House1 or Conjunct Asc
house1 = chart.getHouse(const.HOUSE1)
planetsHouse1 = chart.objects.getObjectsInHouse(house1)
asc = chart.getAngle(const.ASC)
planetsConjAsc = chart.objects.getObjectsAspecting(asc, [0])
_set = _merge(planetsHouse1, planetsConjAsc)
factors.append(['Planets in House1 or Conj Asc', _set])
# Planets conjunct Moon or Mercury
moon = chart.get(const.MOON)
mercury = chart.get(const.MERCURY)
planetsConjMoon = chart.objects.getObjectsAspecting(moon, [0])
planetsConjMercury = chart.objects.getObjectsAspecting(mercury, [0])
_set = _merge(planetsConjMoon, planetsConjMercury)
factors.append(['Planets Conj Moon or Mercury', _set])
# Asc ruler if aspected by disposer
ascRulerID = essential.ruler(asc.sign)
ascRuler = chart.getObject(ascRulerID)
disposerID = essential.ruler(ascRuler.sign)
disposer = chart.getObject(disposerID)
_set = []
if aspects.isAspecting(disposer, ascRuler, const.MAJOR_ASPECTS):
_set = [ascRuler.id]
factors.append(['Asc Ruler if aspected by its disposer', _set]);
# Planets aspecting Moon or Mercury
aspMoon = chart.objects.getObjectsAspecting(moon, [60,90,120,180])
aspMercury = chart.objects.getObjectsAspecting(mercury, [60,90,120,180])
_set = _merge(aspMoon, aspMercury)
factors.append(['Planets Asp Moon or Mercury', _set])
return factors |
def get_bitcoin_virtual_transactions(blockchain_opts, first_block_height, last_block_height, tx_filter=None, spv_last_block=None, first_block_hash=None, **hints):
"""
Get the sequence of virtualchain transactions from the blockchain.
Each transaction returned will be a `nulldata` transaction (i.e. the first output script starts with OP_RETURN).
* output values will be in satoshis
* `fee` will be defined, and will be the total amount sent (in satoshis)
* `txindex` will be defined, and will be the offset in the block where the tx occurs
* `senders` will be defined as a list, and will contain the following information
* `script_pubkey`: an output scriptPubKey hex script
* `units`: a value in satoshis
* `addresses`: a list of zero or more addresses
This list corresponds to the list of outputs that funded the given transaction's inputs.
That is, senders[i] corresponds to the output that funded vin[i], found in transaction vin[i]['txid']
* `nulldata` will be define as the hex string that encodes the OP_RETURN payload
@blockchain_opts must be a dict with the following keys:
* `bitcoind_server`: hostname of the bitcoind peer
* `bitcoind_port`: RPC port of the bitcoind peer
* `bitcoind_p2p_port`: p2p port of the bitcoind peer
* `bitcoind_user`: username to authenticate
* `bitcoind_passwd`: password for authentication
* `bitcoind_spv_path`: path on disk to where SPV headers should be stored
Returns a list of [(block number), [txs]] on success
Returns None on error
"""
headers_path = blockchain_opts['bitcoind_spv_path']
bitcoind_server = "%s:%s" % (blockchain_opts['bitcoind_server'], blockchain_opts['bitcoind_p2p_port'])
spv_last_block = spv_last_block if spv_last_block is not None else last_block_height - 1
if headers_path is None:
log.error("FATAL: bitcoind_spv_path not defined in blockchain options")
os.abort()
if not os.path.exists(headers_path):
log.debug("Will download SPV headers to %s" % headers_path)
# synchronize SPV headers
SPVClient.init( headers_path )
rc = None
for i in xrange(0, 65536, 1):
# basically try forever
try:
rc = SPVClient.sync_header_chain( headers_path, bitcoind_server, spv_last_block )
if not rc:
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.error("Failed to synchronize SPV headers (%s) up to %s. Try again in %s seconds" % (headers_path, last_block_height, delay))
time.sleep( delay )
continue
else:
break
except SystemExit, s:
log.error("Aborting on SPV header sync")
os.abort()
except Exception, e:
log.exception(e)
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.debug("Try again in %s seconds" % delay)
time.sleep( delay )
continue
downloader = None
for i in xrange(0, 65536, 1):
# basically try forever
try:
# fetch all blocks
downloader = BlockchainDownloader( blockchain_opts, blockchain_opts['bitcoind_spv_path'], first_block_height, last_block_height - 1, \
p2p_port=blockchain_opts['bitcoind_p2p_port'], tx_filter=tx_filter )
if first_block_height > last_block_height - 1:
downloader.loop_exit()
break
rc = downloader.run()
if not rc:
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.error("Failed to fetch %s-%s; trying again in %s seconds" % (first_block_height, last_block_height, delay))
time.sleep( delay )
continue
else:
break
except SystemExit, s:
log.error("Aborting on blockchain sync")
os.abort()
except Exception, e:
log.exception(e)
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.debug("Try again in %s seconds" % delay)
time.sleep( delay )
continue
if not rc or downloader is None:
log.error("Failed to fetch blocks %s-%s" % (first_block_height, last_block_height))
return None
# extract
block_info = downloader.get_block_info()
return block_info | Get the sequence of virtualchain transactions from the blockchain.
Each transaction returned will be a `nulldata` transaction (i.e. the first output script starts with OP_RETURN).
* output values will be in satoshis
* `fee` will be defined, and will be the total amount sent (in satoshis)
* `txindex` will be defined, and will be the offset in the block where the tx occurs
* `senders` will be defined as a list, and will contain the following information
* `script_pubkey`: an output scriptPubKey hex script
* `units`: a value in satoshis
* `addresses`: a list of zero or more addresses
This list corresponds to the list of outputs that funded the given transaction's inputs.
That is, senders[i] corresponds to the output that funded vin[i], found in transaction vin[i]['txid']
* `nulldata` will be define as the hex string that encodes the OP_RETURN payload
@blockchain_opts must be a dict with the following keys:
* `bitcoind_server`: hostname of the bitcoind peer
* `bitcoind_port`: RPC port of the bitcoind peer
* `bitcoind_p2p_port`: p2p port of the bitcoind peer
* `bitcoind_user`: username to authenticate
* `bitcoind_passwd`: password for authentication
* `bitcoind_spv_path`: path on disk to where SPV headers should be stored
Returns a list of [(block number), [txs]] on success
Returns None on error | Below is the the instruction that describes the task:
### Input:
Get the sequence of virtualchain transactions from the blockchain.
Each transaction returned will be a `nulldata` transaction (i.e. the first output script starts with OP_RETURN).
* output values will be in satoshis
* `fee` will be defined, and will be the total amount sent (in satoshis)
* `txindex` will be defined, and will be the offset in the block where the tx occurs
* `senders` will be defined as a list, and will contain the following information
* `script_pubkey`: an output scriptPubKey hex script
* `units`: a value in satoshis
* `addresses`: a list of zero or more addresses
This list corresponds to the list of outputs that funded the given transaction's inputs.
That is, senders[i] corresponds to the output that funded vin[i], found in transaction vin[i]['txid']
* `nulldata` will be define as the hex string that encodes the OP_RETURN payload
@blockchain_opts must be a dict with the following keys:
* `bitcoind_server`: hostname of the bitcoind peer
* `bitcoind_port`: RPC port of the bitcoind peer
* `bitcoind_p2p_port`: p2p port of the bitcoind peer
* `bitcoind_user`: username to authenticate
* `bitcoind_passwd`: password for authentication
* `bitcoind_spv_path`: path on disk to where SPV headers should be stored
Returns a list of [(block number), [txs]] on success
Returns None on error
### Response:
def get_bitcoin_virtual_transactions(blockchain_opts, first_block_height, last_block_height, tx_filter=None, spv_last_block=None, first_block_hash=None, **hints):
"""
Get the sequence of virtualchain transactions from the blockchain.
Each transaction returned will be a `nulldata` transaction (i.e. the first output script starts with OP_RETURN).
* output values will be in satoshis
* `fee` will be defined, and will be the total amount sent (in satoshis)
* `txindex` will be defined, and will be the offset in the block where the tx occurs
* `senders` will be defined as a list, and will contain the following information
* `script_pubkey`: an output scriptPubKey hex script
* `units`: a value in satoshis
* `addresses`: a list of zero or more addresses
This list corresponds to the list of outputs that funded the given transaction's inputs.
That is, senders[i] corresponds to the output that funded vin[i], found in transaction vin[i]['txid']
* `nulldata` will be define as the hex string that encodes the OP_RETURN payload
@blockchain_opts must be a dict with the following keys:
* `bitcoind_server`: hostname of the bitcoind peer
* `bitcoind_port`: RPC port of the bitcoind peer
* `bitcoind_p2p_port`: p2p port of the bitcoind peer
* `bitcoind_user`: username to authenticate
* `bitcoind_passwd`: password for authentication
* `bitcoind_spv_path`: path on disk to where SPV headers should be stored
Returns a list of [(block number), [txs]] on success
Returns None on error
"""
headers_path = blockchain_opts['bitcoind_spv_path']
bitcoind_server = "%s:%s" % (blockchain_opts['bitcoind_server'], blockchain_opts['bitcoind_p2p_port'])
spv_last_block = spv_last_block if spv_last_block is not None else last_block_height - 1
if headers_path is None:
log.error("FATAL: bitcoind_spv_path not defined in blockchain options")
os.abort()
if not os.path.exists(headers_path):
log.debug("Will download SPV headers to %s" % headers_path)
# synchronize SPV headers
SPVClient.init( headers_path )
rc = None
for i in xrange(0, 65536, 1):
# basically try forever
try:
rc = SPVClient.sync_header_chain( headers_path, bitcoind_server, spv_last_block )
if not rc:
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.error("Failed to synchronize SPV headers (%s) up to %s. Try again in %s seconds" % (headers_path, last_block_height, delay))
time.sleep( delay )
continue
else:
break
except SystemExit, s:
log.error("Aborting on SPV header sync")
os.abort()
except Exception, e:
log.exception(e)
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.debug("Try again in %s seconds" % delay)
time.sleep( delay )
continue
downloader = None
for i in xrange(0, 65536, 1):
# basically try forever
try:
# fetch all blocks
downloader = BlockchainDownloader( blockchain_opts, blockchain_opts['bitcoind_spv_path'], first_block_height, last_block_height - 1, \
p2p_port=blockchain_opts['bitcoind_p2p_port'], tx_filter=tx_filter )
if first_block_height > last_block_height - 1:
downloader.loop_exit()
break
rc = downloader.run()
if not rc:
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.error("Failed to fetch %s-%s; trying again in %s seconds" % (first_block_height, last_block_height, delay))
time.sleep( delay )
continue
else:
break
except SystemExit, s:
log.error("Aborting on blockchain sync")
os.abort()
except Exception, e:
log.exception(e)
delay = min( 600, 2**i + ((2**i) * random.random()) )
log.debug("Try again in %s seconds" % delay)
time.sleep( delay )
continue
if not rc or downloader is None:
log.error("Failed to fetch blocks %s-%s" % (first_block_height, last_block_height))
return None
# extract
block_info = downloader.get_block_info()
return block_info |
def load_figure(self, fig, fmt):
"""Set a new figure in the figure canvas."""
self.figcanvas.load_figure(fig, fmt)
self.scale_image()
self.figcanvas.repaint() | Set a new figure in the figure canvas. | Below is the the instruction that describes the task:
### Input:
Set a new figure in the figure canvas.
### Response:
def load_figure(self, fig, fmt):
"""Set a new figure in the figure canvas."""
self.figcanvas.load_figure(fig, fmt)
self.scale_image()
self.figcanvas.repaint() |
def vn_info(call=None, kwargs=None):
'''
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
'''
if call != 'function':
raise SaltCloudSystemExit(
'The vn_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
vn_id = kwargs.get('vn_id', None)
if vn_id:
if name:
log.warning(
'Both the \'vn_id\' and \'name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif name:
vn_id = get_vn_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The vn_info function requires either a \'name\' or a \'vn_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.info(auth, int(vn_id))
if response[0] is False:
return response[1]
else:
info = {}
tree = _get_xml(response[1])
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info | Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public | Below is the the instruction that describes the task:
### Input:
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
### Response:
def vn_info(call=None, kwargs=None):
'''
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
'''
if call != 'function':
raise SaltCloudSystemExit(
'The vn_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
vn_id = kwargs.get('vn_id', None)
if vn_id:
if name:
log.warning(
'Both the \'vn_id\' and \'name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif name:
vn_id = get_vn_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The vn_info function requires either a \'name\' or a \'vn_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.info(auth, int(vn_id))
if response[0] is False:
return response[1]
else:
info = {}
tree = _get_xml(response[1])
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info |
def _init_worker(X, X_shape, X_dtype):
"""Initializer for pool for _mprotate"""
# Using a dictionary is not strictly necessary. You can also
# use global variables.
mprotate_dict["X"] = X
mprotate_dict["X_shape"] = X_shape
mprotate_dict["X_dtype"] = X_dtype | Initializer for pool for _mprotate | Below is the the instruction that describes the task:
### Input:
Initializer for pool for _mprotate
### Response:
def _init_worker(X, X_shape, X_dtype):
"""Initializer for pool for _mprotate"""
# Using a dictionary is not strictly necessary. You can also
# use global variables.
mprotate_dict["X"] = X
mprotate_dict["X_shape"] = X_shape
mprotate_dict["X_dtype"] = X_dtype |
def data_merge(a, b):
"""merges b into a and return merged result
based on http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge
and extended to also merge arrays and to replace the content of keys with the same name
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen"""
key = None
# ## debug output
# sys.stderr.write("DEBUG: %s to %s\n" %(b,a))
try:
if a is None or isinstance(a, (six.string_types, float, six.integer_types)):
# border case for first run or if a is a primitive
a = b
elif isinstance(a, list):
# lists can be only appended
if isinstance(b, list):
# merge lists
a.extend(b)
else:
# append to list
a.append(b)
elif isinstance(a, dict):
# dicts must be merged
if isinstance(b, dict):
for key in b:
if key in a:
a[key] = data_merge(a[key], b[key])
else:
a[key] = b[key]
else:
raise YamlReaderError('Cannot merge non-dict "%s" into dict "%s"' % (b, a))
else:
raise YamlReaderError('NOT IMPLEMENTED "%s" into "%s"' % (b, a))
except TypeError as e:
raise YamlReaderError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))
return a | merges b into a and return merged result
based on http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge
and extended to also merge arrays and to replace the content of keys with the same name
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen | Below is the the instruction that describes the task:
### Input:
merges b into a and return merged result
based on http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge
and extended to also merge arrays and to replace the content of keys with the same name
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen
### Response:
def data_merge(a, b):
"""merges b into a and return merged result
based on http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge
and extended to also merge arrays and to replace the content of keys with the same name
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen"""
key = None
# ## debug output
# sys.stderr.write("DEBUG: %s to %s\n" %(b,a))
try:
if a is None or isinstance(a, (six.string_types, float, six.integer_types)):
# border case for first run or if a is a primitive
a = b
elif isinstance(a, list):
# lists can be only appended
if isinstance(b, list):
# merge lists
a.extend(b)
else:
# append to list
a.append(b)
elif isinstance(a, dict):
# dicts must be merged
if isinstance(b, dict):
for key in b:
if key in a:
a[key] = data_merge(a[key], b[key])
else:
a[key] = b[key]
else:
raise YamlReaderError('Cannot merge non-dict "%s" into dict "%s"' % (b, a))
else:
raise YamlReaderError('NOT IMPLEMENTED "%s" into "%s"' % (b, a))
except TypeError as e:
raise YamlReaderError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))
return a |
def _get_lines_in_file(self, filename):
"""
:rtype: list
"""
with codecs.open(filename, encoding='utf-8') as file:
return file.read().splitlines() | :rtype: list | Below is the the instruction that describes the task:
### Input:
:rtype: list
### Response:
def _get_lines_in_file(self, filename):
"""
:rtype: list
"""
with codecs.open(filename, encoding='utf-8') as file:
return file.read().splitlines() |
def _load_x509(certificate):
"""
Loads an ASN.1 object of an x509 certificate into a Certificate object
:param certificate:
An asn1crypto.x509.Certificate object
:return:
A Certificate object
"""
source = certificate.dump()
cf_source = None
try:
cf_source = CFHelpers.cf_data_from_bytes(source)
sec_key_ref = Security.SecCertificateCreateWithData(CoreFoundation.kCFAllocatorDefault, cf_source)
return Certificate(sec_key_ref, certificate)
finally:
if cf_source:
CoreFoundation.CFRelease(cf_source) | Loads an ASN.1 object of an x509 certificate into a Certificate object
:param certificate:
An asn1crypto.x509.Certificate object
:return:
A Certificate object | Below is the the instruction that describes the task:
### Input:
Loads an ASN.1 object of an x509 certificate into a Certificate object
:param certificate:
An asn1crypto.x509.Certificate object
:return:
A Certificate object
### Response:
def _load_x509(certificate):
"""
Loads an ASN.1 object of an x509 certificate into a Certificate object
:param certificate:
An asn1crypto.x509.Certificate object
:return:
A Certificate object
"""
source = certificate.dump()
cf_source = None
try:
cf_source = CFHelpers.cf_data_from_bytes(source)
sec_key_ref = Security.SecCertificateCreateWithData(CoreFoundation.kCFAllocatorDefault, cf_source)
return Certificate(sec_key_ref, certificate)
finally:
if cf_source:
CoreFoundation.CFRelease(cf_source) |
def list_databases(self, instance, limit=None, marker=None):
"""Returns all databases for the specified instance."""
return instance.list_databases(limit=limit, marker=marker) | Returns all databases for the specified instance. | Below is the the instruction that describes the task:
### Input:
Returns all databases for the specified instance.
### Response:
def list_databases(self, instance, limit=None, marker=None):
"""Returns all databases for the specified instance."""
return instance.list_databases(limit=limit, marker=marker) |
def loop(_=None, force=False): # pylint: disable=invalid-name
"""Causes a function to loop indefinitely."""
if not force:
raise AttributeError(
'threads.loop() is DEPRECATED. If you really like this and want to '
'keep it, file an issue at https://github.com/google/openhtf/issues '
'and use it as @loop(force=True) for now.')
def real_loop(fn):
@functools.wraps(fn)
def _proc(*args, **kwargs):
"""Wrapper to return."""
while True:
fn(*args, **kwargs)
_proc.once = fn # way for tests to invoke the function once
# you may need to pass in "self" since this may be unbound.
return _proc
return real_loop | Causes a function to loop indefinitely. | Below is the the instruction that describes the task:
### Input:
Causes a function to loop indefinitely.
### Response:
def loop(_=None, force=False): # pylint: disable=invalid-name
"""Causes a function to loop indefinitely."""
if not force:
raise AttributeError(
'threads.loop() is DEPRECATED. If you really like this and want to '
'keep it, file an issue at https://github.com/google/openhtf/issues '
'and use it as @loop(force=True) for now.')
def real_loop(fn):
@functools.wraps(fn)
def _proc(*args, **kwargs):
"""Wrapper to return."""
while True:
fn(*args, **kwargs)
_proc.once = fn # way for tests to invoke the function once
# you may need to pass in "self" since this may be unbound.
return _proc
return real_loop |
def _monitor_events(self):
"""Watch for Processing Block events."""
LOG.info("Starting to monitor PB events")
check_counter = 0
while True:
if check_counter == 50:
check_counter = 0
LOG.debug('Checking for PB events...')
published_events = self._pb_events.get_published_events()
for event in published_events:
if event.type == 'status_changed':
LOG.info('PB status changed event: %s',
event.data['status'])
if event.data['status'] == 'created':
LOG.info('Acknowledged PB created event (%s) for %s, '
'[timestamp: %s]', event.id,
event.object_id, event.timestamp)
pb = ProcessingBlock(event.object_id)
self._queue.put(event.object_id, pb.priority, pb.type)
if event.data['status'] == 'completed':
LOG.info('Acknowledged PB completed event (%s) for %s,'
' [timestamp: %s]', event.id,
event.object_id, event.timestamp)
self._num_pbcs -= 1
if self._num_pbcs < 0:
self._num_pbcs = 0
time.sleep(0.1)
check_counter += 1 | Watch for Processing Block events. | Below is the the instruction that describes the task:
### Input:
Watch for Processing Block events.
### Response:
def _monitor_events(self):
"""Watch for Processing Block events."""
LOG.info("Starting to monitor PB events")
check_counter = 0
while True:
if check_counter == 50:
check_counter = 0
LOG.debug('Checking for PB events...')
published_events = self._pb_events.get_published_events()
for event in published_events:
if event.type == 'status_changed':
LOG.info('PB status changed event: %s',
event.data['status'])
if event.data['status'] == 'created':
LOG.info('Acknowledged PB created event (%s) for %s, '
'[timestamp: %s]', event.id,
event.object_id, event.timestamp)
pb = ProcessingBlock(event.object_id)
self._queue.put(event.object_id, pb.priority, pb.type)
if event.data['status'] == 'completed':
LOG.info('Acknowledged PB completed event (%s) for %s,'
' [timestamp: %s]', event.id,
event.object_id, event.timestamp)
self._num_pbcs -= 1
if self._num_pbcs < 0:
self._num_pbcs = 0
time.sleep(0.1)
check_counter += 1 |
def getLayerIndex(self, layer):
"""
Given a reference to a layer, returns the index of that layer in
self.layers.
"""
for i in range(len(self.layers)):
if layer == self.layers[i]: # shallow cmp
return i
return -1 # not in list | Given a reference to a layer, returns the index of that layer in
self.layers. | Below is the the instruction that describes the task:
### Input:
Given a reference to a layer, returns the index of that layer in
self.layers.
### Response:
def getLayerIndex(self, layer):
"""
Given a reference to a layer, returns the index of that layer in
self.layers.
"""
for i in range(len(self.layers)):
if layer == self.layers[i]: # shallow cmp
return i
return -1 # not in list |
def Wagner_original(T, Tc, Pc, a, b, c, d):
r'''Calculates vapor pressure using the Wagner equation (3, 6 form).
Requires critical temperature and pressure as well as four coefficients
specific to each chemical.
.. math::
\ln P^{sat}= \ln P_c + \frac{a\tau + b \tau^{1.5} + c\tau^3 + d\tau^6}
{T_r}
\tau = 1 - \frac{T}{T_c}
Parameters
----------
T : float
Temperature of fluid, [K]
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
a, b, c, d : floats
Parameters for wagner equation. Specific to each chemical. [-]
Returns
-------
Psat : float
Vapor pressure at T [Pa]
Notes
-----
Warning: Pc is often treated as adjustable constant.
Examples
--------
Methane, coefficients from [2]_, at 100 K.
>>> Wagner_original(100.0, 190.53, 4596420., a=-6.00435, b=1.1885,
... c=-0.834082, d=-1.22833)
34520.44601450496
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
.. [2] McGarry, Jack. "Correlation and Prediction of the Vapor Pressures of
Pure Liquids over Large Pressure Ranges." Industrial & Engineering
Chemistry Process Design and Development 22, no. 2 (April 1, 1983):
313-22. doi:10.1021/i200021a023.
'''
Tr = T/Tc
tau = 1.0 - Tr
return Pc*exp((a*tau + b*tau**1.5 + c*tau**3 + d*tau**6)/Tr) | r'''Calculates vapor pressure using the Wagner equation (3, 6 form).
Requires critical temperature and pressure as well as four coefficients
specific to each chemical.
.. math::
\ln P^{sat}= \ln P_c + \frac{a\tau + b \tau^{1.5} + c\tau^3 + d\tau^6}
{T_r}
\tau = 1 - \frac{T}{T_c}
Parameters
----------
T : float
Temperature of fluid, [K]
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
a, b, c, d : floats
Parameters for wagner equation. Specific to each chemical. [-]
Returns
-------
Psat : float
Vapor pressure at T [Pa]
Notes
-----
Warning: Pc is often treated as adjustable constant.
Examples
--------
Methane, coefficients from [2]_, at 100 K.
>>> Wagner_original(100.0, 190.53, 4596420., a=-6.00435, b=1.1885,
... c=-0.834082, d=-1.22833)
34520.44601450496
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
.. [2] McGarry, Jack. "Correlation and Prediction of the Vapor Pressures of
Pure Liquids over Large Pressure Ranges." Industrial & Engineering
Chemistry Process Design and Development 22, no. 2 (April 1, 1983):
313-22. doi:10.1021/i200021a023. | Below is the the instruction that describes the task:
### Input:
r'''Calculates vapor pressure using the Wagner equation (3, 6 form).
Requires critical temperature and pressure as well as four coefficients
specific to each chemical.
.. math::
\ln P^{sat}= \ln P_c + \frac{a\tau + b \tau^{1.5} + c\tau^3 + d\tau^6}
{T_r}
\tau = 1 - \frac{T}{T_c}
Parameters
----------
T : float
Temperature of fluid, [K]
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
a, b, c, d : floats
Parameters for wagner equation. Specific to each chemical. [-]
Returns
-------
Psat : float
Vapor pressure at T [Pa]
Notes
-----
Warning: Pc is often treated as adjustable constant.
Examples
--------
Methane, coefficients from [2]_, at 100 K.
>>> Wagner_original(100.0, 190.53, 4596420., a=-6.00435, b=1.1885,
... c=-0.834082, d=-1.22833)
34520.44601450496
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
.. [2] McGarry, Jack. "Correlation and Prediction of the Vapor Pressures of
Pure Liquids over Large Pressure Ranges." Industrial & Engineering
Chemistry Process Design and Development 22, no. 2 (April 1, 1983):
313-22. doi:10.1021/i200021a023.
### Response:
def Wagner_original(T, Tc, Pc, a, b, c, d):
r'''Calculates vapor pressure using the Wagner equation (3, 6 form).
Requires critical temperature and pressure as well as four coefficients
specific to each chemical.
.. math::
\ln P^{sat}= \ln P_c + \frac{a\tau + b \tau^{1.5} + c\tau^3 + d\tau^6}
{T_r}
\tau = 1 - \frac{T}{T_c}
Parameters
----------
T : float
Temperature of fluid, [K]
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
a, b, c, d : floats
Parameters for wagner equation. Specific to each chemical. [-]
Returns
-------
Psat : float
Vapor pressure at T [Pa]
Notes
-----
Warning: Pc is often treated as adjustable constant.
Examples
--------
Methane, coefficients from [2]_, at 100 K.
>>> Wagner_original(100.0, 190.53, 4596420., a=-6.00435, b=1.1885,
... c=-0.834082, d=-1.22833)
34520.44601450496
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
.. [2] McGarry, Jack. "Correlation and Prediction of the Vapor Pressures of
Pure Liquids over Large Pressure Ranges." Industrial & Engineering
Chemistry Process Design and Development 22, no. 2 (April 1, 1983):
313-22. doi:10.1021/i200021a023.
'''
Tr = T/Tc
tau = 1.0 - Tr
return Pc*exp((a*tau + b*tau**1.5 + c*tau**3 + d*tau**6)/Tr) |
def ReqOrderInsert(self, pInstrument: str, pDirection: DirectType, pOffset: OffsetType, pPrice: float = 0.0, pVolume: int = 1, pType: OrderType = OrderType.Limit, pCustom: int = 0):
"""委托
:param pInstrument:
:param pDirection:
:param pOffset:
:param pPrice:
:param pVolume:
:param pType:
:param pCustom:
:return:
"""
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_AnyPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = 0.0
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
if pType == OrderType.Market: # 市价
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_AnyPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = 0.0
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
elif pType == OrderType.Limit: # 限价
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_LimitPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_GFD
LimitPrice = pPrice
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
elif pType == OrderType.FAK: # FAK
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_LimitPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = pPrice
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
elif pType == OrderType.FOK: # FOK
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_LimitPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = pPrice
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_CV # 全部数量
self._req += 1
self.t.ReqOrderInsert(
BrokerID=self.broker,
InvestorID=self.investor,
InstrumentID=pInstrument,
OrderRef="%06d%06d" % (self._req, pCustom % 1000000),
UserID=self.investor,
# 此处ctp_enum与at_struct名称冲突
Direction=TThostFtdcDirectionType.THOST_FTDC_D_Buy if pDirection == DirectType.Buy else TThostFtdcDirectionType.THOST_FTDC_D_Sell,
CombOffsetFlag=chr(TThostFtdcOffsetFlagType.THOST_FTDC_OF_Open.value if pOffset == OffsetType.Open else TThostFtdcOffsetFlagType.THOST_FTDC_OF_CloseToday.value if pOffset == OffsetType.CloseToday else TThostFtdcOffsetFlagType.THOST_FTDC_OF_Close.value),
CombHedgeFlag=chr(TThostFtdcHedgeFlagType.THOST_FTDC_HF_Speculation.value),
IsAutoSuspend=0,
ForceCloseReason=TThostFtdcForceCloseReasonType.THOST_FTDC_FCC_NotForceClose,
IsSwapOrder=0,
ContingentCondition=TThostFtdcContingentConditionType.THOST_FTDC_CC_Immediately,
VolumeCondition=VolumeCondition,
MinVolume=1,
VolumeTotalOriginal=pVolume,
OrderPriceType=OrderPriceType,
TimeCondition=TimeCondition,
LimitPrice=LimitPrice,
) | 委托
:param pInstrument:
:param pDirection:
:param pOffset:
:param pPrice:
:param pVolume:
:param pType:
:param pCustom:
:return: | Below is the the instruction that describes the task:
### Input:
委托
:param pInstrument:
:param pDirection:
:param pOffset:
:param pPrice:
:param pVolume:
:param pType:
:param pCustom:
:return:
### Response:
def ReqOrderInsert(self, pInstrument: str, pDirection: DirectType, pOffset: OffsetType, pPrice: float = 0.0, pVolume: int = 1, pType: OrderType = OrderType.Limit, pCustom: int = 0):
"""委托
:param pInstrument:
:param pDirection:
:param pOffset:
:param pPrice:
:param pVolume:
:param pType:
:param pCustom:
:return:
"""
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_AnyPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = 0.0
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
if pType == OrderType.Market: # 市价
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_AnyPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = 0.0
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
elif pType == OrderType.Limit: # 限价
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_LimitPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_GFD
LimitPrice = pPrice
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
elif pType == OrderType.FAK: # FAK
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_LimitPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = pPrice
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_AV
elif pType == OrderType.FOK: # FOK
OrderPriceType = TThostFtdcOrderPriceTypeType.THOST_FTDC_OPT_LimitPrice
TimeCondition = TThostFtdcTimeConditionType.THOST_FTDC_TC_IOC
LimitPrice = pPrice
VolumeCondition = TThostFtdcVolumeConditionType.THOST_FTDC_VC_CV # 全部数量
self._req += 1
self.t.ReqOrderInsert(
BrokerID=self.broker,
InvestorID=self.investor,
InstrumentID=pInstrument,
OrderRef="%06d%06d" % (self._req, pCustom % 1000000),
UserID=self.investor,
# 此处ctp_enum与at_struct名称冲突
Direction=TThostFtdcDirectionType.THOST_FTDC_D_Buy if pDirection == DirectType.Buy else TThostFtdcDirectionType.THOST_FTDC_D_Sell,
CombOffsetFlag=chr(TThostFtdcOffsetFlagType.THOST_FTDC_OF_Open.value if pOffset == OffsetType.Open else TThostFtdcOffsetFlagType.THOST_FTDC_OF_CloseToday.value if pOffset == OffsetType.CloseToday else TThostFtdcOffsetFlagType.THOST_FTDC_OF_Close.value),
CombHedgeFlag=chr(TThostFtdcHedgeFlagType.THOST_FTDC_HF_Speculation.value),
IsAutoSuspend=0,
ForceCloseReason=TThostFtdcForceCloseReasonType.THOST_FTDC_FCC_NotForceClose,
IsSwapOrder=0,
ContingentCondition=TThostFtdcContingentConditionType.THOST_FTDC_CC_Immediately,
VolumeCondition=VolumeCondition,
MinVolume=1,
VolumeTotalOriginal=pVolume,
OrderPriceType=OrderPriceType,
TimeCondition=TimeCondition,
LimitPrice=LimitPrice,
) |
def _fixStringValue(s, p):
"""Clean up string value including special characters, etc."""
# pylint: disable=too-many-branches
s = s[1:-1]
rv = ''
esc = False
i = -1
while i < len(s) - 1:
i += 1
ch = s[i]
if ch == '\\' and not esc:
esc = True
continue
if not esc:
rv += ch
continue
if ch == '"':
rv += '"'
elif ch == 'n':
rv += '\n'
elif ch == 't':
rv += '\t'
elif ch == 'b':
rv += '\b'
elif ch == 'f':
rv += '\f'
elif ch == 'r':
rv += '\r'
elif ch == '\\':
rv += '\\'
elif ch in ['x', 'X']:
hexc = 0
j = 0
i += 1
while j < 4:
c = s[i + j]
c = c.upper()
if not c.isdigit() and c not in 'ABCDEF':
break
hexc <<= 4
if c.isdigit():
hexc |= ord(c) - ord('0')
else:
hexc |= ord(c) - ord('A') + 0XA
j += 1
if j == 0:
# DSP0004 requires 1..4 hex chars - we have 0
raise MOFParseError(
parser_token=p,
msg="Unicode escape sequence (e.g. '\\x12AB') requires "
"at least one hex character")
rv += six.unichr(hexc)
i += j - 1
esc = False
return rv | Clean up string value including special characters, etc. | Below is the the instruction that describes the task:
### Input:
Clean up string value including special characters, etc.
### Response:
def _fixStringValue(s, p):
"""Clean up string value including special characters, etc."""
# pylint: disable=too-many-branches
s = s[1:-1]
rv = ''
esc = False
i = -1
while i < len(s) - 1:
i += 1
ch = s[i]
if ch == '\\' and not esc:
esc = True
continue
if not esc:
rv += ch
continue
if ch == '"':
rv += '"'
elif ch == 'n':
rv += '\n'
elif ch == 't':
rv += '\t'
elif ch == 'b':
rv += '\b'
elif ch == 'f':
rv += '\f'
elif ch == 'r':
rv += '\r'
elif ch == '\\':
rv += '\\'
elif ch in ['x', 'X']:
hexc = 0
j = 0
i += 1
while j < 4:
c = s[i + j]
c = c.upper()
if not c.isdigit() and c not in 'ABCDEF':
break
hexc <<= 4
if c.isdigit():
hexc |= ord(c) - ord('0')
else:
hexc |= ord(c) - ord('A') + 0XA
j += 1
if j == 0:
# DSP0004 requires 1..4 hex chars - we have 0
raise MOFParseError(
parser_token=p,
msg="Unicode escape sequence (e.g. '\\x12AB') requires "
"at least one hex character")
rv += six.unichr(hexc)
i += j - 1
esc = False
return rv |
def mean_vector(lons, lats):
"""
Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
mean_vec = cart2sph(*mean_vec)
return mean_vec, r_value | Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data. | Below is the the instruction that describes the task:
### Input:
Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
### Response:
def mean_vector(lons, lats):
"""
Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
mean_vec = cart2sph(*mean_vec)
return mean_vec, r_value |
def updates_selection(update_selection):
""" Decorator indicating that the decorated method could change the selection"""
def handle_update(selection, *args, **kwargs):
"""Check for changes in the selection
If the selection is changed by the decorated method, the internal core element lists are updated and a signal is
emitted with the old and new selection as well as the name of the method that caused the change..
"""
old_selection = selection.get_all()
update_selection(selection, *args, **kwargs)
new_selection = selection.get_all()
affected_models = old_selection ^ new_selection
if len(affected_models) != 0: # The selection was updated
deselected_models = old_selection - new_selection
selected_models = new_selection - old_selection
map(selection.relieve_model, deselected_models)
map(selection.observe_model, selected_models)
# Maintain internal lists for fast access
selection.update_core_element_lists()
# Clear focus if no longer in selection
if selection.focus and selection.focus not in new_selection:
del selection.focus
# Send notifications about changes
affected_classes = set(model.core_element.__class__ for model in affected_models)
msg_namedtuple = SelectionChangedSignalMsg(update_selection.__name__, new_selection, old_selection,
affected_classes)
selection.selection_changed_signal.emit(msg_namedtuple)
if selection.parent_signal is not None:
selection.parent_signal.emit(msg_namedtuple)
return handle_update | Decorator indicating that the decorated method could change the selection | Below is the the instruction that describes the task:
### Input:
Decorator indicating that the decorated method could change the selection
### Response:
def updates_selection(update_selection):
""" Decorator indicating that the decorated method could change the selection"""
def handle_update(selection, *args, **kwargs):
"""Check for changes in the selection
If the selection is changed by the decorated method, the internal core element lists are updated and a signal is
emitted with the old and new selection as well as the name of the method that caused the change..
"""
old_selection = selection.get_all()
update_selection(selection, *args, **kwargs)
new_selection = selection.get_all()
affected_models = old_selection ^ new_selection
if len(affected_models) != 0: # The selection was updated
deselected_models = old_selection - new_selection
selected_models = new_selection - old_selection
map(selection.relieve_model, deselected_models)
map(selection.observe_model, selected_models)
# Maintain internal lists for fast access
selection.update_core_element_lists()
# Clear focus if no longer in selection
if selection.focus and selection.focus not in new_selection:
del selection.focus
# Send notifications about changes
affected_classes = set(model.core_element.__class__ for model in affected_models)
msg_namedtuple = SelectionChangedSignalMsg(update_selection.__name__, new_selection, old_selection,
affected_classes)
selection.selection_changed_signal.emit(msg_namedtuple)
if selection.parent_signal is not None:
selection.parent_signal.emit(msg_namedtuple)
return handle_update |
def evaluate(self, instance, step, extra):
"""Evaluate the current ContainerAttribute.
Args:
obj (LazyStub): a lazy stub of the object being constructed, if
needed.
containers (list of LazyStub): a list of lazy stubs of factories
being evaluated in a chain, each item being a future field of
next one.
"""
# Strip the current instance from the chain
chain = step.chain[1:]
if self.strict and not chain:
raise TypeError(
"A ContainerAttribute in 'strict' mode can only be used "
"within a SubFactory.")
return self.function(instance, chain) | Evaluate the current ContainerAttribute.
Args:
obj (LazyStub): a lazy stub of the object being constructed, if
needed.
containers (list of LazyStub): a list of lazy stubs of factories
being evaluated in a chain, each item being a future field of
next one. | Below is the the instruction that describes the task:
### Input:
Evaluate the current ContainerAttribute.
Args:
obj (LazyStub): a lazy stub of the object being constructed, if
needed.
containers (list of LazyStub): a list of lazy stubs of factories
being evaluated in a chain, each item being a future field of
next one.
### Response:
def evaluate(self, instance, step, extra):
"""Evaluate the current ContainerAttribute.
Args:
obj (LazyStub): a lazy stub of the object being constructed, if
needed.
containers (list of LazyStub): a list of lazy stubs of factories
being evaluated in a chain, each item being a future field of
next one.
"""
# Strip the current instance from the chain
chain = step.chain[1:]
if self.strict and not chain:
raise TypeError(
"A ContainerAttribute in 'strict' mode can only be used "
"within a SubFactory.")
return self.function(instance, chain) |
def valid_dict(d, keys=None):
"""
检查是否字典中含有值为None的键(给定键的名称则检查给定的键,如果没有,则检查全部键)
- 如果没有值为None的键,则返回True,反之False
- 如果keys中的键不存在于d中,也返回False
"""
if keys is None:
d_ = d
else:
d_ = itemfilter(lambda item: item[0] in keys, d)
if len(d_) != len(keys):
return False
values = list(itervalues(d_))
return False if None in values else True | 检查是否字典中含有值为None的键(给定键的名称则检查给定的键,如果没有,则检查全部键)
- 如果没有值为None的键,则返回True,反之False
- 如果keys中的键不存在于d中,也返回False | Below is the the instruction that describes the task:
### Input:
检查是否字典中含有值为None的键(给定键的名称则检查给定的键,如果没有,则检查全部键)
- 如果没有值为None的键,则返回True,反之False
- 如果keys中的键不存在于d中,也返回False
### Response:
def valid_dict(d, keys=None):
"""
检查是否字典中含有值为None的键(给定键的名称则检查给定的键,如果没有,则检查全部键)
- 如果没有值为None的键,则返回True,反之False
- 如果keys中的键不存在于d中,也返回False
"""
if keys is None:
d_ = d
else:
d_ = itemfilter(lambda item: item[0] in keys, d)
if len(d_) != len(keys):
return False
values = list(itervalues(d_))
return False if None in values else True |
def destroy():
"""Destroy a database."""
if not os.path.exists(ARGS.database):
exit('Error: The database does not exist; you must create it first.')
if ARGS.force:
os.remove(ARGS.database)
elif raw_input('Destroy {0} [y/n]? '.format(ARGS.database)) in ('y', 'Y'):
os.remove(ARGS.database) | Destroy a database. | Below is the the instruction that describes the task:
### Input:
Destroy a database.
### Response:
def destroy():
"""Destroy a database."""
if not os.path.exists(ARGS.database):
exit('Error: The database does not exist; you must create it first.')
if ARGS.force:
os.remove(ARGS.database)
elif raw_input('Destroy {0} [y/n]? '.format(ARGS.database)) in ('y', 'Y'):
os.remove(ARGS.database) |
def mad(arr, relative=True):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
med = np.nanmedian(arr, axis=1)
mad = np.nanmedian(np.abs(arr - med[:, np.newaxis]), axis=1)
if relative:
return mad / med
else:
return mad | Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation | Below is the the instruction that describes the task:
### Input:
Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
### Response:
def mad(arr, relative=True):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
med = np.nanmedian(arr, axis=1)
mad = np.nanmedian(np.abs(arr - med[:, np.newaxis]), axis=1)
if relative:
return mad / med
else:
return mad |
def memory_write16(self, addr, data, zone=None):
"""Writes half-words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of half-words to write
zone (str): optional memory zone to access
Returns:
Number of half-words written to target.
Raises:
JLinkException: on memory access error.
"""
return self.memory_write(addr, data, zone, 16) | Writes half-words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of half-words to write
zone (str): optional memory zone to access
Returns:
Number of half-words written to target.
Raises:
JLinkException: on memory access error. | Below is the the instruction that describes the task:
### Input:
Writes half-words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of half-words to write
zone (str): optional memory zone to access
Returns:
Number of half-words written to target.
Raises:
JLinkException: on memory access error.
### Response:
def memory_write16(self, addr, data, zone=None):
"""Writes half-words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of half-words to write
zone (str): optional memory zone to access
Returns:
Number of half-words written to target.
Raises:
JLinkException: on memory access error.
"""
return self.memory_write(addr, data, zone, 16) |
async def answer_media_group(self, media: typing.Union[MediaGroup, typing.List],
disable_notification: typing.Union[base.Boolean, None] = None,
reply=False) -> typing.List[Message]:
"""
Use this method to send a group of photos or videos as an album.
Source: https://core.telegram.org/bots/api#sendmediagroup
:param media: A JSON-serialized array describing photos and videos to be sent
:type media: :obj:`typing.Union[types.MediaGroup, typing.List]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, an array of the sent Messages is returned.
:rtype: typing.List[types.Message]
"""
return await self.bot.send_media_group(self.chat.id,
media=media,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None) | Use this method to send a group of photos or videos as an album.
Source: https://core.telegram.org/bots/api#sendmediagroup
:param media: A JSON-serialized array describing photos and videos to be sent
:type media: :obj:`typing.Union[types.MediaGroup, typing.List]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, an array of the sent Messages is returned.
:rtype: typing.List[types.Message] | Below is the the instruction that describes the task:
### Input:
Use this method to send a group of photos or videos as an album.
Source: https://core.telegram.org/bots/api#sendmediagroup
:param media: A JSON-serialized array describing photos and videos to be sent
:type media: :obj:`typing.Union[types.MediaGroup, typing.List]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, an array of the sent Messages is returned.
:rtype: typing.List[types.Message]
### Response:
async def answer_media_group(self, media: typing.Union[MediaGroup, typing.List],
disable_notification: typing.Union[base.Boolean, None] = None,
reply=False) -> typing.List[Message]:
"""
Use this method to send a group of photos or videos as an album.
Source: https://core.telegram.org/bots/api#sendmediagroup
:param media: A JSON-serialized array describing photos and videos to be sent
:type media: :obj:`typing.Union[types.MediaGroup, typing.List]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, an array of the sent Messages is returned.
:rtype: typing.List[types.Message]
"""
return await self.bot.send_media_group(self.chat.id,
media=media,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None) |
def tojson(val, indent=None):
'''
Implementation of tojson filter (only present in Jinja 2.9 and later). If
Jinja 2.9 or later is installed, then the upstream version of this filter
will be used.
'''
options = {'ensure_ascii': True}
if indent is not None:
options['indent'] = indent
return (
salt.utils.json.dumps(
val, **options
).replace('<', '\\u003c')
.replace('>', '\\u003e')
.replace('&', '\\u0026')
.replace("'", '\\u0027')
) | Implementation of tojson filter (only present in Jinja 2.9 and later). If
Jinja 2.9 or later is installed, then the upstream version of this filter
will be used. | Below is the the instruction that describes the task:
### Input:
Implementation of tojson filter (only present in Jinja 2.9 and later). If
Jinja 2.9 or later is installed, then the upstream version of this filter
will be used.
### Response:
def tojson(val, indent=None):
'''
Implementation of tojson filter (only present in Jinja 2.9 and later). If
Jinja 2.9 or later is installed, then the upstream version of this filter
will be used.
'''
options = {'ensure_ascii': True}
if indent is not None:
options['indent'] = indent
return (
salt.utils.json.dumps(
val, **options
).replace('<', '\\u003c')
.replace('>', '\\u003e')
.replace('&', '\\u0026')
.replace("'", '\\u0027')
) |
def balance(ctx, accounts):
""" Show Account balances
"""
t = PrettyTable(["Account", "Amount"])
t.align = "r"
for a in accounts:
account = Account(a, peerplays_instance=ctx.peerplays)
for b in account.balances:
t.add_row([str(a), str(b)])
click.echo(str(t)) | Show Account balances | Below is the the instruction that describes the task:
### Input:
Show Account balances
### Response:
def balance(ctx, accounts):
""" Show Account balances
"""
t = PrettyTable(["Account", "Amount"])
t.align = "r"
for a in accounts:
account = Account(a, peerplays_instance=ctx.peerplays)
for b in account.balances:
t.add_row([str(a), str(b)])
click.echo(str(t)) |
def network_interfaces_list(resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
nics = __utils__['azurearm.paged_object_to_list'](
netconn.network_interfaces.list(
resource_group_name=resource_group
)
)
for nic in nics:
result[nic['name']] = nic
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup
### Response:
def network_interfaces_list(resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
nics = __utils__['azurearm.paged_object_to_list'](
netconn.network_interfaces.list(
resource_group_name=resource_group
)
)
for nic in nics:
result[nic['name']] = nic
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result |
def parse_response(gdb_mi_text):
"""Parse gdb mi text and turn it into a dictionary.
See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records
for details on types of gdb mi output.
Args:
gdb_mi_text (str): String output from gdb
Returns:
dict with the following keys:
type (either 'notify', 'result', 'console', 'log', 'target', 'done'),
message (str or None),
payload (str, list, dict, or None)
"""
stream = StringStream(gdb_mi_text, debug=_DEBUG)
if _GDB_MI_NOTIFY_RE.match(gdb_mi_text):
token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream)
return {
"type": "notify",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_RESULT_RE.match(gdb_mi_text):
token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream)
return {
"type": "result",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text):
return {
"type": "console",
"message": None,
"payload": _GDB_MI_CONSOLE_RE.match(gdb_mi_text).groups()[0],
}
elif _GDB_MI_LOG_RE.match(gdb_mi_text):
return {
"type": "log",
"message": None,
"payload": _GDB_MI_LOG_RE.match(gdb_mi_text).groups()[0],
}
elif _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text):
return {
"type": "target",
"message": None,
"payload": _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text).groups()[0],
}
elif response_is_finished(gdb_mi_text):
return {"type": "done", "message": None, "payload": None}
else:
# This was not gdb mi output, so it must have just been printed by
# the inferior program that's being debugged
return {"type": "output", "message": None, "payload": gdb_mi_text} | Parse gdb mi text and turn it into a dictionary.
See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records
for details on types of gdb mi output.
Args:
gdb_mi_text (str): String output from gdb
Returns:
dict with the following keys:
type (either 'notify', 'result', 'console', 'log', 'target', 'done'),
message (str or None),
payload (str, list, dict, or None) | Below is the the instruction that describes the task:
### Input:
Parse gdb mi text and turn it into a dictionary.
See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records
for details on types of gdb mi output.
Args:
gdb_mi_text (str): String output from gdb
Returns:
dict with the following keys:
type (either 'notify', 'result', 'console', 'log', 'target', 'done'),
message (str or None),
payload (str, list, dict, or None)
### Response:
def parse_response(gdb_mi_text):
"""Parse gdb mi text and turn it into a dictionary.
See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records
for details on types of gdb mi output.
Args:
gdb_mi_text (str): String output from gdb
Returns:
dict with the following keys:
type (either 'notify', 'result', 'console', 'log', 'target', 'done'),
message (str or None),
payload (str, list, dict, or None)
"""
stream = StringStream(gdb_mi_text, debug=_DEBUG)
if _GDB_MI_NOTIFY_RE.match(gdb_mi_text):
token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream)
return {
"type": "notify",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_RESULT_RE.match(gdb_mi_text):
token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream)
return {
"type": "result",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text):
return {
"type": "console",
"message": None,
"payload": _GDB_MI_CONSOLE_RE.match(gdb_mi_text).groups()[0],
}
elif _GDB_MI_LOG_RE.match(gdb_mi_text):
return {
"type": "log",
"message": None,
"payload": _GDB_MI_LOG_RE.match(gdb_mi_text).groups()[0],
}
elif _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text):
return {
"type": "target",
"message": None,
"payload": _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text).groups()[0],
}
elif response_is_finished(gdb_mi_text):
return {"type": "done", "message": None, "payload": None}
else:
# This was not gdb mi output, so it must have just been printed by
# the inferior program that's being debugged
return {"type": "output", "message": None, "payload": gdb_mi_text} |
def createStyle(self, body, verbose=None):
"""
Creates a new Visual Style using the message body.
Returns the title of the new Visual Style. If the title of the Visual Style already existed in the session, a new one will be automatically generated and returned.
:param body: The details of the new Visual Style to be created.
:param verbose: print more
:returns: 200: successful operation
"""
PARAMS=set_param(['body'],[body])
response=api(url=self.___url+'styles', PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Creates a new Visual Style using the message body.
Returns the title of the new Visual Style. If the title of the Visual Style already existed in the session, a new one will be automatically generated and returned.
:param body: The details of the new Visual Style to be created.
:param verbose: print more
:returns: 200: successful operation | Below is the the instruction that describes the task:
### Input:
Creates a new Visual Style using the message body.
Returns the title of the new Visual Style. If the title of the Visual Style already existed in the session, a new one will be automatically generated and returned.
:param body: The details of the new Visual Style to be created.
:param verbose: print more
:returns: 200: successful operation
### Response:
def createStyle(self, body, verbose=None):
"""
Creates a new Visual Style using the message body.
Returns the title of the new Visual Style. If the title of the Visual Style already existed in the session, a new one will be automatically generated and returned.
:param body: The details of the new Visual Style to be created.
:param verbose: print more
:returns: 200: successful operation
"""
PARAMS=set_param(['body'],[body])
response=api(url=self.___url+'styles', PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def _process_task(self, task, function_execution_info):
"""Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task).
"""
assert self.current_task_id.is_nil()
assert self.task_context.task_index == 0
assert self.task_context.put_index == 1
if task.actor_id().is_nil():
# If this worker is not an actor, check that `task_driver_id`
# was reset when the worker finished the previous task.
assert self.task_driver_id.is_nil()
# Set the driver ID of the current running task. This is
# needed so that if the task throws an exception, we propagate
# the error message to the correct driver.
self.task_driver_id = task.driver_id()
else:
# If this worker is an actor, task_driver_id wasn't reset.
# Check that current task's driver ID equals the previous one.
assert self.task_driver_id == task.driver_id()
self.task_context.current_task_id = task.task_id()
function_descriptor = FunctionDescriptor.from_bytes_list(
task.function_descriptor_list())
args = task.arguments()
return_object_ids = task.returns()
if (not task.actor_id().is_nil()
or not task.actor_creation_id().is_nil()):
dummy_return_id = return_object_ids.pop()
function_executor = function_execution_info.function
function_name = function_execution_info.function_name
# Get task arguments from the object store.
try:
if function_name != "__ray_terminate__":
self.reraise_actor_init_error()
self.memory_monitor.raise_if_low_memory()
with profiling.profile("task:deserialize_arguments"):
arguments = self._get_arguments_for_execution(
function_name, args)
except Exception as e:
self._handle_process_task_failure(
function_descriptor, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
return
# Execute the task.
try:
self._current_task = task
with profiling.profile("task:execute"):
if (task.actor_id().is_nil()
and task.actor_creation_id().is_nil()):
outputs = function_executor(*arguments)
else:
if not task.actor_id().is_nil():
key = task.actor_id()
else:
key = task.actor_creation_id()
outputs = function_executor(dummy_return_id,
self.actors[key], *arguments)
except Exception as e:
# Determine whether the exception occured during a task, not an
# actor method.
task_exception = task.actor_id().is_nil()
traceback_str = ray.utils.format_error_message(
traceback.format_exc(), task_exception=task_exception)
self._handle_process_task_failure(
function_descriptor, return_object_ids, e, traceback_str)
return
finally:
self._current_task = None
# Store the outputs in the local object store.
try:
with profiling.profile("task:store_outputs"):
# If this is an actor task, then the last object ID returned by
# the task is a dummy output, not returned by the function
# itself. Decrement to get the correct number of return values.
num_returns = len(return_object_ids)
if num_returns == 1:
outputs = (outputs, )
self._store_outputs_in_object_store(return_object_ids, outputs)
except Exception as e:
self._handle_process_task_failure(
function_descriptor, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc())) | Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task). | Below is the the instruction that describes the task:
### Input:
Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task).
### Response:
def _process_task(self, task, function_execution_info):
"""Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task).
"""
assert self.current_task_id.is_nil()
assert self.task_context.task_index == 0
assert self.task_context.put_index == 1
if task.actor_id().is_nil():
# If this worker is not an actor, check that `task_driver_id`
# was reset when the worker finished the previous task.
assert self.task_driver_id.is_nil()
# Set the driver ID of the current running task. This is
# needed so that if the task throws an exception, we propagate
# the error message to the correct driver.
self.task_driver_id = task.driver_id()
else:
# If this worker is an actor, task_driver_id wasn't reset.
# Check that current task's driver ID equals the previous one.
assert self.task_driver_id == task.driver_id()
self.task_context.current_task_id = task.task_id()
function_descriptor = FunctionDescriptor.from_bytes_list(
task.function_descriptor_list())
args = task.arguments()
return_object_ids = task.returns()
if (not task.actor_id().is_nil()
or not task.actor_creation_id().is_nil()):
dummy_return_id = return_object_ids.pop()
function_executor = function_execution_info.function
function_name = function_execution_info.function_name
# Get task arguments from the object store.
try:
if function_name != "__ray_terminate__":
self.reraise_actor_init_error()
self.memory_monitor.raise_if_low_memory()
with profiling.profile("task:deserialize_arguments"):
arguments = self._get_arguments_for_execution(
function_name, args)
except Exception as e:
self._handle_process_task_failure(
function_descriptor, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
return
# Execute the task.
try:
self._current_task = task
with profiling.profile("task:execute"):
if (task.actor_id().is_nil()
and task.actor_creation_id().is_nil()):
outputs = function_executor(*arguments)
else:
if not task.actor_id().is_nil():
key = task.actor_id()
else:
key = task.actor_creation_id()
outputs = function_executor(dummy_return_id,
self.actors[key], *arguments)
except Exception as e:
# Determine whether the exception occured during a task, not an
# actor method.
task_exception = task.actor_id().is_nil()
traceback_str = ray.utils.format_error_message(
traceback.format_exc(), task_exception=task_exception)
self._handle_process_task_failure(
function_descriptor, return_object_ids, e, traceback_str)
return
finally:
self._current_task = None
# Store the outputs in the local object store.
try:
with profiling.profile("task:store_outputs"):
# If this is an actor task, then the last object ID returned by
# the task is a dummy output, not returned by the function
# itself. Decrement to get the correct number of return values.
num_returns = len(return_object_ids)
if num_returns == 1:
outputs = (outputs, )
self._store_outputs_in_object_store(return_object_ids, outputs)
except Exception as e:
self._handle_process_task_failure(
function_descriptor, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc())) |
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=alembic_migrations.VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. | Below is the the instruction that describes the task:
### Input:
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
### Response:
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=alembic_migrations.VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose() |
def generate_data_for_env_problem(problem_name):
"""Generate data for `EnvProblem`s."""
assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps "
"should be greater than zero")
assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be"
" greather than zero")
problem = registry.env_problem(problem_name)
task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
# TODO(msaffar): Handle large values for env_problem_batch_size where we
# cannot create that many environments within the same process.
problem.initialize(batch_size=FLAGS.env_problem_batch_size)
env_problem_utils.play_env_problem_randomly(
problem, num_steps=FLAGS.env_problem_max_env_steps)
problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id) | Generate data for `EnvProblem`s. | Below is the the instruction that describes the task:
### Input:
Generate data for `EnvProblem`s.
### Response:
def generate_data_for_env_problem(problem_name):
"""Generate data for `EnvProblem`s."""
assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps "
"should be greater than zero")
assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be"
" greather than zero")
problem = registry.env_problem(problem_name)
task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
# TODO(msaffar): Handle large values for env_problem_batch_size where we
# cannot create that many environments within the same process.
problem.initialize(batch_size=FLAGS.env_problem_batch_size)
env_problem_utils.play_env_problem_randomly(
problem, num_steps=FLAGS.env_problem_max_env_steps)
problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id) |
def _eval_firstorder(self, rvecs, data, sigma):
"""The first-order Barnes approximation"""
if not self.blocksize:
dist_between_points = self._distance_matrix(rvecs, self.x)
gaussian_weights = self._weight(dist_between_points, sigma=sigma)
return gaussian_weights.dot(data) / gaussian_weights.sum(axis=1)
else:
# Now rather than calculating the distance matrix all at once,
# we do it in chunks over rvecs
ans = np.zeros(rvecs.shape[0], dtype='float')
bs = self.blocksize
for a in range(0, rvecs.shape[0], bs):
dist = self._distance_matrix(rvecs[a:a+bs], self.x)
weights = self._weight(dist, sigma=sigma)
ans[a:a+bs] += weights.dot(data) / weights.sum(axis=1)
return ans | The first-order Barnes approximation | Below is the the instruction that describes the task:
### Input:
The first-order Barnes approximation
### Response:
def _eval_firstorder(self, rvecs, data, sigma):
"""The first-order Barnes approximation"""
if not self.blocksize:
dist_between_points = self._distance_matrix(rvecs, self.x)
gaussian_weights = self._weight(dist_between_points, sigma=sigma)
return gaussian_weights.dot(data) / gaussian_weights.sum(axis=1)
else:
# Now rather than calculating the distance matrix all at once,
# we do it in chunks over rvecs
ans = np.zeros(rvecs.shape[0], dtype='float')
bs = self.blocksize
for a in range(0, rvecs.shape[0], bs):
dist = self._distance_matrix(rvecs[a:a+bs], self.x)
weights = self._weight(dist, sigma=sigma)
ans[a:a+bs] += weights.dot(data) / weights.sum(axis=1)
return ans |
def struct_time_to_jd(st):
"""
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
"""
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day) | Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored. | Below is the the instruction that describes the task:
### Input:
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
### Response:
def struct_time_to_jd(st):
"""
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
"""
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day) |
def parse(url_or_path, encoding=None, handler_class=DrillHandler):
"""
:param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement`
"""
handler = handler_class()
parser = expat.ParserCreate(encoding)
parser.buffer_text = 1
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
if isinstance(url_or_path, basestring):
if '://' in url_or_path[:20]:
with contextlib.closing(url_lib.urlopen(url_or_path)) as f:
parser.ParseFile(f)
elif url_or_path[:100].strip().startswith('<'):
if isinstance(url_or_path, unicode):
if encoding is None:
encoding = 'utf-8'
url_or_path = url_or_path.encode(encoding)
parser.Parse(url_or_path, True)
else:
with open(url_or_path, 'rb') as f:
parser.ParseFile(f)
elif PY3 and isinstance(url_or_path, bytes):
parser.ParseFile(bytes_io(url_or_path))
else:
parser.ParseFile(url_or_path)
return handler.root | :param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement` | Below is the the instruction that describes the task:
### Input:
:param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement`
### Response:
def parse(url_or_path, encoding=None, handler_class=DrillHandler):
"""
:param url_or_path: A file-like object, a filesystem path, a URL, or a string containing XML
:rtype: :class:`XmlElement`
"""
handler = handler_class()
parser = expat.ParserCreate(encoding)
parser.buffer_text = 1
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
if isinstance(url_or_path, basestring):
if '://' in url_or_path[:20]:
with contextlib.closing(url_lib.urlopen(url_or_path)) as f:
parser.ParseFile(f)
elif url_or_path[:100].strip().startswith('<'):
if isinstance(url_or_path, unicode):
if encoding is None:
encoding = 'utf-8'
url_or_path = url_or_path.encode(encoding)
parser.Parse(url_or_path, True)
else:
with open(url_or_path, 'rb') as f:
parser.ParseFile(f)
elif PY3 and isinstance(url_or_path, bytes):
parser.ParseFile(bytes_io(url_or_path))
else:
parser.ParseFile(url_or_path)
return handler.root |
def createCitation(self, multiCite = False):
"""Overwriting the general [citation creator](./ExtendedRecord.html#metaknowledge.ExtendedRecord.createCitation) to deal with scopus weirdness.
Creates a citation string, using the same format as other WOS citations, for the [Record](./Record.html#metaknowledge.Record) by reading the relevant special tags (`'year'`, `'J9'`, `'volume'`, `'beginningPage'`, `'DOI'`) and using it to create a [Citation](./Citation.html#metaknowledge.citation.Citation) object.
# Parameters
_multiCite_ : `optional [bool]`
> Default `False`, if `True` a tuple of Citations is returned with each having a different one of the records authors as the author
# Returns
`Citation`
> A [Citation](./Citation.html#metaknowledge.citation.Citation) object containing a citation for the Record.
"""
#Need to put the import here to avoid circular import issues
from ..citation import Citation
valsStr = ''
if multiCite:
auths = []
for auth in self.get("authorsShort", []):
auths.append(auth.replace(',', ''))
else:
if self.get("authorsShort", False):
valsStr += self['authorsShort'][0].replace(',', '') + ', '
if self.get("title", False):
valsStr += self.get('title').replace('(', '').replace(')', '') + ' '
if self.get("year", False):
valsStr += "({}) ".format(self.get('year'))
if self.get("journal", False):
valsStr += self.get('journal') + ', '
if self.get("volume", False):
valsStr += str(self.get('volume')) + ', '
if self.get("beginningPage", False):
valsStr += 'PP. ' + str(self.get('beginningPage'))
if multiCite and len(auths) > 0:
ret = (tuple((Citation(a + valsStr, scopusMode = True) for a in auths)))
elif multiCite:
ret = Citation(valsStr, scopusMode = True),
else:
ret = Citation(valsStr, scopusMode = True)
if multiCite:
rL = []
for c in ret:
if c.bad:
c.year = self.get('year', 0)
c.name = self.get('title', '').upper()
c.journal = self.get("journal", '').upper()
rL.append(c)
return tuple(rL)
else:
if ret.bad:
ret.year = self.get('year', 0)
ret.name = self.get('title', '').upper()
ret.journal = self.get("journal", '').upper()
return ret | Overwriting the general [citation creator](./ExtendedRecord.html#metaknowledge.ExtendedRecord.createCitation) to deal with scopus weirdness.
Creates a citation string, using the same format as other WOS citations, for the [Record](./Record.html#metaknowledge.Record) by reading the relevant special tags (`'year'`, `'J9'`, `'volume'`, `'beginningPage'`, `'DOI'`) and using it to create a [Citation](./Citation.html#metaknowledge.citation.Citation) object.
# Parameters
_multiCite_ : `optional [bool]`
> Default `False`, if `True` a tuple of Citations is returned with each having a different one of the records authors as the author
# Returns
`Citation`
> A [Citation](./Citation.html#metaknowledge.citation.Citation) object containing a citation for the Record. | Below is the the instruction that describes the task:
### Input:
Overwriting the general [citation creator](./ExtendedRecord.html#metaknowledge.ExtendedRecord.createCitation) to deal with scopus weirdness.
Creates a citation string, using the same format as other WOS citations, for the [Record](./Record.html#metaknowledge.Record) by reading the relevant special tags (`'year'`, `'J9'`, `'volume'`, `'beginningPage'`, `'DOI'`) and using it to create a [Citation](./Citation.html#metaknowledge.citation.Citation) object.
# Parameters
_multiCite_ : `optional [bool]`
> Default `False`, if `True` a tuple of Citations is returned with each having a different one of the records authors as the author
# Returns
`Citation`
> A [Citation](./Citation.html#metaknowledge.citation.Citation) object containing a citation for the Record.
### Response:
def createCitation(self, multiCite = False):
"""Overwriting the general [citation creator](./ExtendedRecord.html#metaknowledge.ExtendedRecord.createCitation) to deal with scopus weirdness.
Creates a citation string, using the same format as other WOS citations, for the [Record](./Record.html#metaknowledge.Record) by reading the relevant special tags (`'year'`, `'J9'`, `'volume'`, `'beginningPage'`, `'DOI'`) and using it to create a [Citation](./Citation.html#metaknowledge.citation.Citation) object.
# Parameters
_multiCite_ : `optional [bool]`
> Default `False`, if `True` a tuple of Citations is returned with each having a different one of the records authors as the author
# Returns
`Citation`
> A [Citation](./Citation.html#metaknowledge.citation.Citation) object containing a citation for the Record.
"""
#Need to put the import here to avoid circular import issues
from ..citation import Citation
valsStr = ''
if multiCite:
auths = []
for auth in self.get("authorsShort", []):
auths.append(auth.replace(',', ''))
else:
if self.get("authorsShort", False):
valsStr += self['authorsShort'][0].replace(',', '') + ', '
if self.get("title", False):
valsStr += self.get('title').replace('(', '').replace(')', '') + ' '
if self.get("year", False):
valsStr += "({}) ".format(self.get('year'))
if self.get("journal", False):
valsStr += self.get('journal') + ', '
if self.get("volume", False):
valsStr += str(self.get('volume')) + ', '
if self.get("beginningPage", False):
valsStr += 'PP. ' + str(self.get('beginningPage'))
if multiCite and len(auths) > 0:
ret = (tuple((Citation(a + valsStr, scopusMode = True) for a in auths)))
elif multiCite:
ret = Citation(valsStr, scopusMode = True),
else:
ret = Citation(valsStr, scopusMode = True)
if multiCite:
rL = []
for c in ret:
if c.bad:
c.year = self.get('year', 0)
c.name = self.get('title', '').upper()
c.journal = self.get("journal", '').upper()
rL.append(c)
return tuple(rL)
else:
if ret.bad:
ret.year = self.get('year', 0)
ret.name = self.get('title', '').upper()
ret.journal = self.get("journal", '').upper()
return ret |
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
matches = []
n = len(text)
for word in self.namespace:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches | Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match. | Below is the the instruction that describes the task:
### Input:
Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
### Response:
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
matches = []
n = len(text)
for word in self.namespace:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches |
def _format_list(result):
"""Format list responses into a table."""
if not result:
return result
if isinstance(result[0], dict):
return _format_list_objects(result)
table = Table(['value'])
for item in result:
table.add_row([iter_to_table(item)])
return table | Format list responses into a table. | Below is the the instruction that describes the task:
### Input:
Format list responses into a table.
### Response:
def _format_list(result):
"""Format list responses into a table."""
if not result:
return result
if isinstance(result[0], dict):
return _format_list_objects(result)
table = Table(['value'])
for item in result:
table.add_row([iter_to_table(item)])
return table |
def Show(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call native `ShowWindow(SW.Show)`.
Return bool, True if succeed otherwise False.
"""
return self.ShowWindow(SW.Show, waitTime) | Call native `ShowWindow(SW.Show)`.
Return bool, True if succeed otherwise False. | Below is the the instruction that describes the task:
### Input:
Call native `ShowWindow(SW.Show)`.
Return bool, True if succeed otherwise False.
### Response:
def Show(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call native `ShowWindow(SW.Show)`.
Return bool, True if succeed otherwise False.
"""
return self.ShowWindow(SW.Show, waitTime) |
def service_reload(service_name, restart_on_failure=False, **kwargs):
"""Reload a system service, optionally falling back to restart if
reload fails.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_reload('ceph-osd', id=4)
:param service_name: the name of the service to reload
:param restart_on_failure: boolean indicating whether to fallback to a
restart if the reload fails.
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
service_result = service('reload', service_name, **kwargs)
if not service_result and restart_on_failure:
service_result = service('restart', service_name, **kwargs)
return service_result | Reload a system service, optionally falling back to restart if
reload fails.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_reload('ceph-osd', id=4)
:param service_name: the name of the service to reload
:param restart_on_failure: boolean indicating whether to fallback to a
restart if the reload fails.
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd). | Below is the the instruction that describes the task:
### Input:
Reload a system service, optionally falling back to restart if
reload fails.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_reload('ceph-osd', id=4)
:param service_name: the name of the service to reload
:param restart_on_failure: boolean indicating whether to fallback to a
restart if the reload fails.
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
### Response:
def service_reload(service_name, restart_on_failure=False, **kwargs):
"""Reload a system service, optionally falling back to restart if
reload fails.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_reload('ceph-osd', id=4)
:param service_name: the name of the service to reload
:param restart_on_failure: boolean indicating whether to fallback to a
restart if the reload fails.
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
service_result = service('reload', service_name, **kwargs)
if not service_result and restart_on_failure:
service_result = service('restart', service_name, **kwargs)
return service_result |
def accept_EP_PKG(self, inst):
'''
A Package contains packageable elements
'''
for child in many(inst).PE_PE[8000]():
self.accept(child) | A Package contains packageable elements | Below is the the instruction that describes the task:
### Input:
A Package contains packageable elements
### Response:
def accept_EP_PKG(self, inst):
'''
A Package contains packageable elements
'''
for child in many(inst).PE_PE[8000]():
self.accept(child) |
def _parse_value(self): # type: () -> Item
"""
Attempts to parse a value at the current position.
"""
self.mark()
c = self._current
trivia = Trivia()
if c == StringType.SLB.value:
return self._parse_basic_string()
elif c == StringType.SLL.value:
return self._parse_literal_string()
elif c == BoolType.TRUE.value[0]:
return self._parse_true()
elif c == BoolType.FALSE.value[0]:
return self._parse_false()
elif c == "[":
return self._parse_array()
elif c == "{":
return self._parse_inline_table()
elif c in "+-" or self._peek(4) in {
"+inf",
"-inf",
"inf",
"+nan",
"-nan",
"nan",
}:
# Number
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
elif c in string.digits:
# Integer, Float, Date, Time or DateTime
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
m = RFC_3339_LOOSE.match(raw)
if m:
if m.group(1) and m.group(5):
# datetime
try:
return DateTime(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateTimeError)
if m.group(1):
try:
return Date(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateError)
if m.group(5):
try:
return Time(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidTimeError)
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
else:
raise self.parse_error(UnexpectedCharError, c) | Attempts to parse a value at the current position. | Below is the the instruction that describes the task:
### Input:
Attempts to parse a value at the current position.
### Response:
def _parse_value(self): # type: () -> Item
"""
Attempts to parse a value at the current position.
"""
self.mark()
c = self._current
trivia = Trivia()
if c == StringType.SLB.value:
return self._parse_basic_string()
elif c == StringType.SLL.value:
return self._parse_literal_string()
elif c == BoolType.TRUE.value[0]:
return self._parse_true()
elif c == BoolType.FALSE.value[0]:
return self._parse_false()
elif c == "[":
return self._parse_array()
elif c == "{":
return self._parse_inline_table()
elif c in "+-" or self._peek(4) in {
"+inf",
"-inf",
"inf",
"+nan",
"-nan",
"nan",
}:
# Number
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
elif c in string.digits:
# Integer, Float, Date, Time or DateTime
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
m = RFC_3339_LOOSE.match(raw)
if m:
if m.group(1) and m.group(5):
# datetime
try:
return DateTime(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateTimeError)
if m.group(1):
try:
return Date(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidDateError)
if m.group(5):
try:
return Time(parse_rfc3339(raw), trivia, raw)
except ValueError:
raise self.parse_error(InvalidTimeError)
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
else:
raise self.parse_error(UnexpectedCharError, c) |
def drp(points, epsilon):
""" Douglas ramer peucker
Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Args:
points (:obj:`list` of :obj:`Point`)
epsilon (float): drp threshold
Returns:
:obj:`list` of :obj:`Point`
"""
dmax = 0.0
index = 0
for i in range(1, len(points)-1):
dist = point_line_distance(points[i], points[0], points[-1])
if dist > dmax:
index = i
dmax = dist
if dmax > epsilon:
return drp(points[:index+1], epsilon)[:-1] + drp(points[index:], epsilon)
else:
return [points[0], points[-1]] | Douglas ramer peucker
Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Args:
points (:obj:`list` of :obj:`Point`)
epsilon (float): drp threshold
Returns:
:obj:`list` of :obj:`Point` | Below is the the instruction that describes the task:
### Input:
Douglas ramer peucker
Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Args:
points (:obj:`list` of :obj:`Point`)
epsilon (float): drp threshold
Returns:
:obj:`list` of :obj:`Point`
### Response:
def drp(points, epsilon):
""" Douglas ramer peucker
Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Args:
points (:obj:`list` of :obj:`Point`)
epsilon (float): drp threshold
Returns:
:obj:`list` of :obj:`Point`
"""
dmax = 0.0
index = 0
for i in range(1, len(points)-1):
dist = point_line_distance(points[i], points[0], points[-1])
if dist > dmax:
index = i
dmax = dist
if dmax > epsilon:
return drp(points[:index+1], epsilon)[:-1] + drp(points[index:], epsilon)
else:
return [points[0], points[-1]] |
def list_math_substraction_number(a, b):
"""!
@brief Calculates subtraction between list and number.
@details Each element from list 'a' is subtracted by number 'b'.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): Value that supports mathematical subtraction.
@return (list) Results of subtraction between list and number.
"""
return [a[i] - b for i in range(len(a))]; | !
@brief Calculates subtraction between list and number.
@details Each element from list 'a' is subtracted by number 'b'.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): Value that supports mathematical subtraction.
@return (list) Results of subtraction between list and number. | Below is the the instruction that describes the task:
### Input:
!
@brief Calculates subtraction between list and number.
@details Each element from list 'a' is subtracted by number 'b'.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): Value that supports mathematical subtraction.
@return (list) Results of subtraction between list and number.
### Response:
def list_math_substraction_number(a, b):
"""!
@brief Calculates subtraction between list and number.
@details Each element from list 'a' is subtracted by number 'b'.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): Value that supports mathematical subtraction.
@return (list) Results of subtraction between list and number.
"""
return [a[i] - b for i in range(len(a))]; |
def notChainStr (states, s):
"""XXX I'm not sure this is how it should be done, but I'm going to
try it anyway. Note that for this case, I require only single character
arcs, since I would have to basically invert all accepting states and
non-accepting states of any sub-NFA's.
"""
assert len(s) > 0
arcs = list(map(lambda x : newArcPair(states, x), s))
finish = len(states)
states.append([])
start, lastFinish = arcs[0]
states[start].append((EMPTY, finish))
for crntStart, crntFinish in arcs[1:]:
states[lastFinish].append((EMPTY, crntStart))
states[crntStart].append((EMPTY, finish))
return start, finish | XXX I'm not sure this is how it should be done, but I'm going to
try it anyway. Note that for this case, I require only single character
arcs, since I would have to basically invert all accepting states and
non-accepting states of any sub-NFA's. | Below is the the instruction that describes the task:
### Input:
XXX I'm not sure this is how it should be done, but I'm going to
try it anyway. Note that for this case, I require only single character
arcs, since I would have to basically invert all accepting states and
non-accepting states of any sub-NFA's.
### Response:
def notChainStr (states, s):
"""XXX I'm not sure this is how it should be done, but I'm going to
try it anyway. Note that for this case, I require only single character
arcs, since I would have to basically invert all accepting states and
non-accepting states of any sub-NFA's.
"""
assert len(s) > 0
arcs = list(map(lambda x : newArcPair(states, x), s))
finish = len(states)
states.append([])
start, lastFinish = arcs[0]
states[start].append((EMPTY, finish))
for crntStart, crntFinish in arcs[1:]:
states[lastFinish].append((EMPTY, crntStart))
states[crntStart].append((EMPTY, finish))
return start, finish |
def _set_below(self, v, load=False):
"""
Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_below is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_below() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """below must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""",
})
self.__below = t
if hasattr(self, '_set'):
self._set() | Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_below is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_below() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_below is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_below() directly.
### Response:
def _set_below(self, v, load=False):
"""
Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_below is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_below() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """below must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""",
})
self.__below = t
if hasattr(self, '_set'):
self._set() |
def value_call(method_name, *args, **kwargs):
"""
Creates an effect that will call value's method with specified name
with the specified arguments and keywords.
@param method_name: the name of method belonging to the value.
@type method_name: str
"""
def value_call(value, context, **_params):
method = getattr(value, method_name)
return _call(method, args, kwargs)
return value_call | Creates an effect that will call value's method with specified name
with the specified arguments and keywords.
@param method_name: the name of method belonging to the value.
@type method_name: str | Below is the the instruction that describes the task:
### Input:
Creates an effect that will call value's method with specified name
with the specified arguments and keywords.
@param method_name: the name of method belonging to the value.
@type method_name: str
### Response:
def value_call(method_name, *args, **kwargs):
"""
Creates an effect that will call value's method with specified name
with the specified arguments and keywords.
@param method_name: the name of method belonging to the value.
@type method_name: str
"""
def value_call(value, context, **_params):
method = getattr(value, method_name)
return _call(method, args, kwargs)
return value_call |
def to_volume(mesh,
file_name=None,
max_element=None,
mesher_id=1):
"""
Convert a surface mesh to a 3D volume mesh generated by gmsh.
An easy way to install the gmsh sdk is through the gmsh-sdk
package on pypi, which downloads and sets up gmsh:
pip install gmsh-sdk
Algorithm details, although check gmsh docs for more information:
The "Delaunay" algorithm is split into three separate steps.
First, an initial mesh of the union of all the volumes in the model is performed,
without inserting points in the volume. The surface mesh is then recovered using H.
Si's boundary recovery algorithm Tetgen/BR. Then a three-dimensional version of the
2D Delaunay algorithm described above is applied to insert points in the volume to
respect the mesh size constraints.
The Frontal" algorithm uses J. Schoeberl's Netgen algorithm.
The "HXT" algorithm is a new efficient and parallel reimplementaton
of the Delaunay algorithm.
The "MMG3D" algorithm (experimental) allows to generate
anisotropic tetrahedralizations
Parameters
--------------
mesh : trimesh.Trimesh
Surface mesh of input geometry
file_name : str or None
Location to save output, in .msh (gmsh) or .bdf (Nastran) format
max_element : float or None
Maximum length of an element in the volume mesh
mesher_id : int
3D unstructured algorithms:
1: Delaunay, 4: Frontal, 7: MMG3D, 10: HXT
Returns
------------
data : None or bytes
MSH data, only returned if file_name is None
"""
# checks mesher selection
if mesher_id not in [1, 4, 7, 10]:
raise ValueError('unavilable mesher selected!')
else:
mesher_id = int(mesher_id)
# set max element length to a best guess if not specified
if max_element is None:
max_element = np.sqrt(np.mean(mesh.area_faces))
if file_name is not None:
# check extensions to make sure it is supported format
if not any(file_name.lower().endswith(e)
for e in ['.bdf', '.msh', '.inp', '.diff', '.mesh']):
raise ValueError(
'Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), ' +
'Diffpack (*.diff) and Inria Medit (*.mesh) formats ' +
'are available!')
# exports to disk for gmsh to read using a temp file
mesh_file = tempfile.NamedTemporaryFile(suffix='.stl', delete=False)
mesh_file.close()
mesh.export(mesh_file.name)
# starts Gmsh Python API script
gmsh.initialize()
gmsh.option.setNumber("General.Terminal", 1)
gmsh.model.add('Nastran_stl')
gmsh.merge(mesh_file.name)
dimtag = gmsh.model.getEntities()[0]
dim = dimtag[0]
tag = dimtag[1]
surf_loop = gmsh.model.geo.addSurfaceLoop([tag])
gmsh.model.geo.addVolume([surf_loop])
gmsh.model.geo.synchronize()
# We can then generate a 3D mesh...
gmsh.option.setNumber("Mesh.Algorithm3D", mesher_id)
gmsh.option.setNumber("Mesh.CharacteristicLengthMax", max_element)
gmsh.model.mesh.generate(3)
dimtag2 = gmsh.model.getEntities()[1]
dim2 = dimtag2[0]
tag2 = dimtag2[1]
p2 = gmsh.model.addPhysicalGroup(dim2, [tag2])
gmsh.model.setPhysicalName(dim, p2, 'Nastran_bdf')
data = None
# if file name is None, return msh data using a tempfile
if file_name is None:
out_data = tempfile.NamedTemporaryFile(suffix='.msh', delete=False)
# windows gets mad if two processes try to open the same file
out_data.close()
gmsh.write(out_data.name)
with open(out_data.name, 'rb') as f:
data = f.read()
else:
gmsh.write(file_name)
# close up shop
gmsh.finalize()
return data | Convert a surface mesh to a 3D volume mesh generated by gmsh.
An easy way to install the gmsh sdk is through the gmsh-sdk
package on pypi, which downloads and sets up gmsh:
pip install gmsh-sdk
Algorithm details, although check gmsh docs for more information:
The "Delaunay" algorithm is split into three separate steps.
First, an initial mesh of the union of all the volumes in the model is performed,
without inserting points in the volume. The surface mesh is then recovered using H.
Si's boundary recovery algorithm Tetgen/BR. Then a three-dimensional version of the
2D Delaunay algorithm described above is applied to insert points in the volume to
respect the mesh size constraints.
The Frontal" algorithm uses J. Schoeberl's Netgen algorithm.
The "HXT" algorithm is a new efficient and parallel reimplementaton
of the Delaunay algorithm.
The "MMG3D" algorithm (experimental) allows to generate
anisotropic tetrahedralizations
Parameters
--------------
mesh : trimesh.Trimesh
Surface mesh of input geometry
file_name : str or None
Location to save output, in .msh (gmsh) or .bdf (Nastran) format
max_element : float or None
Maximum length of an element in the volume mesh
mesher_id : int
3D unstructured algorithms:
1: Delaunay, 4: Frontal, 7: MMG3D, 10: HXT
Returns
------------
data : None or bytes
MSH data, only returned if file_name is None | Below is the the instruction that describes the task:
### Input:
Convert a surface mesh to a 3D volume mesh generated by gmsh.
An easy way to install the gmsh sdk is through the gmsh-sdk
package on pypi, which downloads and sets up gmsh:
pip install gmsh-sdk
Algorithm details, although check gmsh docs for more information:
The "Delaunay" algorithm is split into three separate steps.
First, an initial mesh of the union of all the volumes in the model is performed,
without inserting points in the volume. The surface mesh is then recovered using H.
Si's boundary recovery algorithm Tetgen/BR. Then a three-dimensional version of the
2D Delaunay algorithm described above is applied to insert points in the volume to
respect the mesh size constraints.
The Frontal" algorithm uses J. Schoeberl's Netgen algorithm.
The "HXT" algorithm is a new efficient and parallel reimplementaton
of the Delaunay algorithm.
The "MMG3D" algorithm (experimental) allows to generate
anisotropic tetrahedralizations
Parameters
--------------
mesh : trimesh.Trimesh
Surface mesh of input geometry
file_name : str or None
Location to save output, in .msh (gmsh) or .bdf (Nastran) format
max_element : float or None
Maximum length of an element in the volume mesh
mesher_id : int
3D unstructured algorithms:
1: Delaunay, 4: Frontal, 7: MMG3D, 10: HXT
Returns
------------
data : None or bytes
MSH data, only returned if file_name is None
### Response:
def to_volume(mesh,
file_name=None,
max_element=None,
mesher_id=1):
"""
Convert a surface mesh to a 3D volume mesh generated by gmsh.
An easy way to install the gmsh sdk is through the gmsh-sdk
package on pypi, which downloads and sets up gmsh:
pip install gmsh-sdk
Algorithm details, although check gmsh docs for more information:
The "Delaunay" algorithm is split into three separate steps.
First, an initial mesh of the union of all the volumes in the model is performed,
without inserting points in the volume. The surface mesh is then recovered using H.
Si's boundary recovery algorithm Tetgen/BR. Then a three-dimensional version of the
2D Delaunay algorithm described above is applied to insert points in the volume to
respect the mesh size constraints.
The Frontal" algorithm uses J. Schoeberl's Netgen algorithm.
The "HXT" algorithm is a new efficient and parallel reimplementaton
of the Delaunay algorithm.
The "MMG3D" algorithm (experimental) allows to generate
anisotropic tetrahedralizations
Parameters
--------------
mesh : trimesh.Trimesh
Surface mesh of input geometry
file_name : str or None
Location to save output, in .msh (gmsh) or .bdf (Nastran) format
max_element : float or None
Maximum length of an element in the volume mesh
mesher_id : int
3D unstructured algorithms:
1: Delaunay, 4: Frontal, 7: MMG3D, 10: HXT
Returns
------------
data : None or bytes
MSH data, only returned if file_name is None
"""
# checks mesher selection
if mesher_id not in [1, 4, 7, 10]:
raise ValueError('unavilable mesher selected!')
else:
mesher_id = int(mesher_id)
# set max element length to a best guess if not specified
if max_element is None:
max_element = np.sqrt(np.mean(mesh.area_faces))
if file_name is not None:
# check extensions to make sure it is supported format
if not any(file_name.lower().endswith(e)
for e in ['.bdf', '.msh', '.inp', '.diff', '.mesh']):
raise ValueError(
'Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), ' +
'Diffpack (*.diff) and Inria Medit (*.mesh) formats ' +
'are available!')
# exports to disk for gmsh to read using a temp file
mesh_file = tempfile.NamedTemporaryFile(suffix='.stl', delete=False)
mesh_file.close()
mesh.export(mesh_file.name)
# starts Gmsh Python API script
gmsh.initialize()
gmsh.option.setNumber("General.Terminal", 1)
gmsh.model.add('Nastran_stl')
gmsh.merge(mesh_file.name)
dimtag = gmsh.model.getEntities()[0]
dim = dimtag[0]
tag = dimtag[1]
surf_loop = gmsh.model.geo.addSurfaceLoop([tag])
gmsh.model.geo.addVolume([surf_loop])
gmsh.model.geo.synchronize()
# We can then generate a 3D mesh...
gmsh.option.setNumber("Mesh.Algorithm3D", mesher_id)
gmsh.option.setNumber("Mesh.CharacteristicLengthMax", max_element)
gmsh.model.mesh.generate(3)
dimtag2 = gmsh.model.getEntities()[1]
dim2 = dimtag2[0]
tag2 = dimtag2[1]
p2 = gmsh.model.addPhysicalGroup(dim2, [tag2])
gmsh.model.setPhysicalName(dim, p2, 'Nastran_bdf')
data = None
# if file name is None, return msh data using a tempfile
if file_name is None:
out_data = tempfile.NamedTemporaryFile(suffix='.msh', delete=False)
# windows gets mad if two processes try to open the same file
out_data.close()
gmsh.write(out_data.name)
with open(out_data.name, 'rb') as f:
data = f.read()
else:
gmsh.write(file_name)
# close up shop
gmsh.finalize()
return data |
def filter_pages(pages, pagenum, pagename):
""" Choices pages by pagenum and pagename """
if pagenum:
try:
pages = [list(pages)[pagenum - 1]]
except IndexError:
raise IndexError('Invalid page number: %d' % pagenum)
if pagename:
pages = [page for page in pages if page.name == pagename]
if pages == []:
raise IndexError('Page not found: pagename=%s' % pagename)
return pages | Choices pages by pagenum and pagename | Below is the the instruction that describes the task:
### Input:
Choices pages by pagenum and pagename
### Response:
def filter_pages(pages, pagenum, pagename):
""" Choices pages by pagenum and pagename """
if pagenum:
try:
pages = [list(pages)[pagenum - 1]]
except IndexError:
raise IndexError('Invalid page number: %d' % pagenum)
if pagename:
pages = [page for page in pages if page.name == pagename]
if pages == []:
raise IndexError('Page not found: pagename=%s' % pagename)
return pages |
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
sys.stdout.write("Shutdown Notebook Server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y'):
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print "No answer for 5s:",
print "resuming operation..."
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler) | confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows. | Below is the the instruction that describes the task:
### Input:
confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
### Response:
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
sys.stdout.write("Shutdown Notebook Server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y'):
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print "No answer for 5s:",
print "resuming operation..."
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler) |
def cache_warmup(strategy_name, *args, **kwargs):
"""
Warm up cache.
This task periodically hits charts to warm up the cache.
"""
logger.info('Loading strategy')
class_ = None
for class_ in strategies:
if class_.name == strategy_name:
break
else:
message = f'No strategy {strategy_name} found!'
logger.error(message)
return message
logger.info(f'Loading {class_.__name__}')
try:
strategy = class_(*args, **kwargs)
logger.info('Success!')
except TypeError:
message = 'Error loading strategy!'
logger.exception(message)
return message
results = {'success': [], 'errors': []}
for url in strategy.get_urls():
try:
logger.info(f'Fetching {url}')
requests.get(url)
results['success'].append(url)
except RequestException:
logger.exception('Error warming up cache!')
results['errors'].append(url)
return results | Warm up cache.
This task periodically hits charts to warm up the cache. | Below is the the instruction that describes the task:
### Input:
Warm up cache.
This task periodically hits charts to warm up the cache.
### Response:
def cache_warmup(strategy_name, *args, **kwargs):
"""
Warm up cache.
This task periodically hits charts to warm up the cache.
"""
logger.info('Loading strategy')
class_ = None
for class_ in strategies:
if class_.name == strategy_name:
break
else:
message = f'No strategy {strategy_name} found!'
logger.error(message)
return message
logger.info(f'Loading {class_.__name__}')
try:
strategy = class_(*args, **kwargs)
logger.info('Success!')
except TypeError:
message = 'Error loading strategy!'
logger.exception(message)
return message
results = {'success': [], 'errors': []}
for url in strategy.get_urls():
try:
logger.info(f'Fetching {url}')
requests.get(url)
results['success'].append(url)
except RequestException:
logger.exception('Error warming up cache!')
results['errors'].append(url)
return results |
def _remember_avatarness(
self, character, graph, node,
is_avatar=True, branch=None, turn=None,
tick=None
):
"""Use this to record a change in avatarness.
Should be called whenever a node that wasn't an avatar of a
character now is, and whenever a node that was an avatar of a
character now isn't.
``character`` is the one using the node as an avatar,
``graph`` is the character the node is in.
"""
branch = branch or self.branch
turn = turn or self.turn
tick = tick or self.tick
self._avatarness_cache.store(
character,
graph,
node,
branch,
turn,
tick,
is_avatar
)
self.query.avatar_set(
character,
graph,
node,
branch,
turn,
tick,
is_avatar
) | Use this to record a change in avatarness.
Should be called whenever a node that wasn't an avatar of a
character now is, and whenever a node that was an avatar of a
character now isn't.
``character`` is the one using the node as an avatar,
``graph`` is the character the node is in. | Below is the the instruction that describes the task:
### Input:
Use this to record a change in avatarness.
Should be called whenever a node that wasn't an avatar of a
character now is, and whenever a node that was an avatar of a
character now isn't.
``character`` is the one using the node as an avatar,
``graph`` is the character the node is in.
### Response:
def _remember_avatarness(
self, character, graph, node,
is_avatar=True, branch=None, turn=None,
tick=None
):
"""Use this to record a change in avatarness.
Should be called whenever a node that wasn't an avatar of a
character now is, and whenever a node that was an avatar of a
character now isn't.
``character`` is the one using the node as an avatar,
``graph`` is the character the node is in.
"""
branch = branch or self.branch
turn = turn or self.turn
tick = tick or self.tick
self._avatarness_cache.store(
character,
graph,
node,
branch,
turn,
tick,
is_avatar
)
self.query.avatar_set(
character,
graph,
node,
branch,
turn,
tick,
is_avatar
) |
def in_casapy (helper, asdm=None, ms=None):
"""This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting.
"""
if asdm is None:
raise ValueError ('asdm')
if ms is None:
raise ValueError ('ms')
helper.casans.importasdm (
asdm = asdm,
vis = ms,
asis = 'Antenna Station Receiver Source CalAtmosphere CalWVR CorrelatorMode SBSummary',
bdfflags = True,
lazy = False,
process_caldevice = False,
) | This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting. | Below is the the instruction that describes the task:
### Input:
This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting.
### Response:
def in_casapy (helper, asdm=None, ms=None):
"""This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting.
"""
if asdm is None:
raise ValueError ('asdm')
if ms is None:
raise ValueError ('ms')
helper.casans.importasdm (
asdm = asdm,
vis = ms,
asis = 'Antenna Station Receiver Source CalAtmosphere CalWVR CorrelatorMode SBSummary',
bdfflags = True,
lazy = False,
process_caldevice = False,
) |
def command(self, keycode):
'''
Presses a key.
Generates the actual key press on the device and prints the line in the script.
'''
self.device.press(keycode)
self.printOperation(None, Operation.PRESS, keycode) | Presses a key.
Generates the actual key press on the device and prints the line in the script. | Below is the the instruction that describes the task:
### Input:
Presses a key.
Generates the actual key press on the device and prints the line in the script.
### Response:
def command(self, keycode):
'''
Presses a key.
Generates the actual key press on the device and prints the line in the script.
'''
self.device.press(keycode)
self.printOperation(None, Operation.PRESS, keycode) |
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for beeing
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin | Add a plugin to the list of plugins and prepare it for beeing
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API. | Below is the the instruction that describes the task:
### Input:
Add a plugin to the list of plugins and prepare it for beeing
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
### Response:
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for beeing
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin |
def fit(self, X):
"""Fit the model.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
None
"""
if isinstance(X, (pd.Series, pd.DataFrame)):
self.name = X.name
self.constant_value = self._get_constant_value(X)
if self.constant_value is None:
self.mean = np.mean(X)
self.std = np.std(X)
else:
self._replace_constant_methods()
self.fitted = True | Fit the model.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
None | Below is the the instruction that describes the task:
### Input:
Fit the model.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
None
### Response:
def fit(self, X):
"""Fit the model.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
None
"""
if isinstance(X, (pd.Series, pd.DataFrame)):
self.name = X.name
self.constant_value = self._get_constant_value(X)
if self.constant_value is None:
self.mean = np.mean(X)
self.std = np.std(X)
else:
self._replace_constant_methods()
self.fitted = True |
def newest(self):
""" Gets the newest entry in the view, regardless of sort order """
if self._order_by == 'newest':
return self.first
if self._order_by == 'oldest':
return self.last
return max(self.entries, key=lambda x: (x.date, x.id)) | Gets the newest entry in the view, regardless of sort order | Below is the the instruction that describes the task:
### Input:
Gets the newest entry in the view, regardless of sort order
### Response:
def newest(self):
""" Gets the newest entry in the view, regardless of sort order """
if self._order_by == 'newest':
return self.first
if self._order_by == 'oldest':
return self.last
return max(self.entries, key=lambda x: (x.date, x.id)) |
def _fill(self):
"""Advance the iterator without returning the old head."""
prev = self._head
super(AutoApplyIterator, self)._fill()
if self._head is not None:
self.on_next(self._head, prev) | Advance the iterator without returning the old head. | Below is the the instruction that describes the task:
### Input:
Advance the iterator without returning the old head.
### Response:
def _fill(self):
"""Advance the iterator without returning the old head."""
prev = self._head
super(AutoApplyIterator, self)._fill()
if self._head is not None:
self.on_next(self._head, prev) |
def _on_event(self, conv_event):
"""Open conversation tab for new messages & pass events to notifier."""
conv = self._conv_list.get(conv_event.conversation_id)
user = conv.get_user(conv_event.user_id)
show_notification = all((
isinstance(conv_event, hangups.ChatMessageEvent),
not user.is_self,
not conv.is_quiet,
))
if show_notification:
self.add_conversation_tab(conv_event.conversation_id)
if self._discreet_notifications:
notification = DISCREET_NOTIFICATION
else:
notification = notifier.Notification(
user.full_name, get_conv_name(conv), conv_event.text
)
self._notifier.send(notification) | Open conversation tab for new messages & pass events to notifier. | Below is the the instruction that describes the task:
### Input:
Open conversation tab for new messages & pass events to notifier.
### Response:
def _on_event(self, conv_event):
"""Open conversation tab for new messages & pass events to notifier."""
conv = self._conv_list.get(conv_event.conversation_id)
user = conv.get_user(conv_event.user_id)
show_notification = all((
isinstance(conv_event, hangups.ChatMessageEvent),
not user.is_self,
not conv.is_quiet,
))
if show_notification:
self.add_conversation_tab(conv_event.conversation_id)
if self._discreet_notifications:
notification = DISCREET_NOTIFICATION
else:
notification = notifier.Notification(
user.full_name, get_conv_name(conv), conv_event.text
)
self._notifier.send(notification) |
def pdf(self, mu):
"""
PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2)) | PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu) | Below is the the instruction that describes the task:
### Input:
PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
### Response:
def pdf(self, mu):
"""
PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2)) |
def get_resource(remote):
"""Query CERN Resources to get user info and groups."""
cached_resource = session.pop('cern_resource', None)
if cached_resource:
return cached_resource
response = remote.get(REMOTE_APP_RESOURCE_API_URL)
dict_response = get_dict_from_response(response)
session['cern_resource'] = dict_response
return dict_response | Query CERN Resources to get user info and groups. | Below is the the instruction that describes the task:
### Input:
Query CERN Resources to get user info and groups.
### Response:
def get_resource(remote):
"""Query CERN Resources to get user info and groups."""
cached_resource = session.pop('cern_resource', None)
if cached_resource:
return cached_resource
response = remote.get(REMOTE_APP_RESOURCE_API_URL)
dict_response = get_dict_from_response(response)
session['cern_resource'] = dict_response
return dict_response |
def feed_forward_layers(inputs, outputs, connections):
"""
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output.
"""
required = required_for_output(inputs, outputs, connections)
layers = []
s = set(inputs)
while 1:
# Find candidate nodes c for the next layer. These nodes should connect
# a node in s to a node not in s.
c = set(b for (a, b) in connections if a in s and b not in s)
# Keep only the used nodes whose entire input set is contained in s.
t = set()
for n in c:
if n in required and all(a in s for (a, b) in connections if b == n):
t.add(n)
if not t:
break
layers.append(t)
s = s.union(t)
return layers | Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output. | Below is the the instruction that describes the task:
### Input:
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output.
### Response:
def feed_forward_layers(inputs, outputs, connections):
"""
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output.
"""
required = required_for_output(inputs, outputs, connections)
layers = []
s = set(inputs)
while 1:
# Find candidate nodes c for the next layer. These nodes should connect
# a node in s to a node not in s.
c = set(b for (a, b) in connections if a in s and b not in s)
# Keep only the used nodes whose entire input set is contained in s.
t = set()
for n in c:
if n in required and all(a in s for (a, b) in connections if b == n):
t.add(n)
if not t:
break
layers.append(t)
s = s.union(t)
return layers |
def maybe_convert_platform_interval(values):
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is not
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
values = np.asarray(values)
return maybe_convert_platform(values) | Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array | Below is the the instruction that describes the task:
### Input:
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
### Response:
def maybe_convert_platform_interval(values):
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is not
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
values = np.asarray(values)
return maybe_convert_platform(values) |
def init(self):
"""
To be overridden to initialize the datasets needed by the calculation
"""
oq = self.oqparam
if not oq.risk_imtls:
if self.datastore.parent:
oq.risk_imtls = (
self.datastore.parent['oqparam'].risk_imtls)
if 'precalc' in vars(self):
self.rlzs_assoc = self.precalc.rlzs_assoc
elif 'csm_info' in self.datastore:
csm_info = self.datastore['csm_info']
if oq.hazard_calculation_id and 'gsim_logic_tree' in oq.inputs:
# redefine the realizations by reading the weights from the
# gsim_logic_tree_file that could be different from the parent
csm_info.gsim_lt = logictree.GsimLogicTree(
oq.inputs['gsim_logic_tree'], set(csm_info.trts))
self.rlzs_assoc = csm_info.get_rlzs_assoc()
elif hasattr(self, 'csm'):
self.check_floating_spinning()
self.rlzs_assoc = self.csm.info.get_rlzs_assoc()
else: # build a fake; used by risk-from-file calculators
self.datastore['csm_info'] = fake = source.CompositionInfo.fake()
self.rlzs_assoc = fake.get_rlzs_assoc() | To be overridden to initialize the datasets needed by the calculation | Below is the the instruction that describes the task:
### Input:
To be overridden to initialize the datasets needed by the calculation
### Response:
def init(self):
"""
To be overridden to initialize the datasets needed by the calculation
"""
oq = self.oqparam
if not oq.risk_imtls:
if self.datastore.parent:
oq.risk_imtls = (
self.datastore.parent['oqparam'].risk_imtls)
if 'precalc' in vars(self):
self.rlzs_assoc = self.precalc.rlzs_assoc
elif 'csm_info' in self.datastore:
csm_info = self.datastore['csm_info']
if oq.hazard_calculation_id and 'gsim_logic_tree' in oq.inputs:
# redefine the realizations by reading the weights from the
# gsim_logic_tree_file that could be different from the parent
csm_info.gsim_lt = logictree.GsimLogicTree(
oq.inputs['gsim_logic_tree'], set(csm_info.trts))
self.rlzs_assoc = csm_info.get_rlzs_assoc()
elif hasattr(self, 'csm'):
self.check_floating_spinning()
self.rlzs_assoc = self.csm.info.get_rlzs_assoc()
else: # build a fake; used by risk-from-file calculators
self.datastore['csm_info'] = fake = source.CompositionInfo.fake()
self.rlzs_assoc = fake.get_rlzs_assoc() |
def _set_slave_timeout(self, dpid, port, timeout):
"""set the timeout time at some port of some datapath."""
slave = self._get_slave(dpid, port)
if slave:
slave['timeout'] = timeout | set the timeout time at some port of some datapath. | Below is the the instruction that describes the task:
### Input:
set the timeout time at some port of some datapath.
### Response:
def _set_slave_timeout(self, dpid, port, timeout):
"""set the timeout time at some port of some datapath."""
slave = self._get_slave(dpid, port)
if slave:
slave['timeout'] = timeout |
def replace_variable(self, variable):
"""
Replaces an existing OpenFisca variable in the tax and benefit system by a new one.
The new variable must have the same name than the replaced one.
If no variable with the given name exists in the tax and benefit system, no error will be raised and the new variable will be simply added.
:param Variable variable: New variable to add. Must be a subclass of Variable.
"""
name = variable.__name__
if self.variables.get(name) is not None:
del self.variables[name]
self.load_variable(variable, update = False) | Replaces an existing OpenFisca variable in the tax and benefit system by a new one.
The new variable must have the same name than the replaced one.
If no variable with the given name exists in the tax and benefit system, no error will be raised and the new variable will be simply added.
:param Variable variable: New variable to add. Must be a subclass of Variable. | Below is the the instruction that describes the task:
### Input:
Replaces an existing OpenFisca variable in the tax and benefit system by a new one.
The new variable must have the same name than the replaced one.
If no variable with the given name exists in the tax and benefit system, no error will be raised and the new variable will be simply added.
:param Variable variable: New variable to add. Must be a subclass of Variable.
### Response:
def replace_variable(self, variable):
"""
Replaces an existing OpenFisca variable in the tax and benefit system by a new one.
The new variable must have the same name than the replaced one.
If no variable with the given name exists in the tax and benefit system, no error will be raised and the new variable will be simply added.
:param Variable variable: New variable to add. Must be a subclass of Variable.
"""
name = variable.__name__
if self.variables.get(name) is not None:
del self.variables[name]
self.load_variable(variable, update = False) |
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu | wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols | Below is the the instruction that describes the task:
### Input:
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
### Response:
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu |
def can_lookup_objective_prerequisites(self):
"""Tests if this user can perform Objective lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a PermissionDenied. This is intended as a
hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - false if lookup methods are not authorized,
true otherwise
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['objectiveRequisiteHints']['canLookup'] | Tests if this user can perform Objective lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a PermissionDenied. This is intended as a
hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - false if lookup methods are not authorized,
true otherwise
compliance: mandatory - This method must be implemented. | Below is the the instruction that describes the task:
### Input:
Tests if this user can perform Objective lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a PermissionDenied. This is intended as a
hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - false if lookup methods are not authorized,
true otherwise
compliance: mandatory - This method must be implemented.
### Response:
def can_lookup_objective_prerequisites(self):
"""Tests if this user can perform Objective lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a PermissionDenied. This is intended as a
hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - false if lookup methods are not authorized,
true otherwise
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['objectiveRequisiteHints']['canLookup'] |
def jiggle_text_file(self): # type: () -> int
"""
Update ver strings in a non-python, ordinary text file in root of package (next to setup.py).
:return:
"""
changed = 0
files_to_update = []
for version_txt in self.file_inventory.text_files:
if os.path.isfile(version_txt):
files_to_update.append(version_txt)
if not files_to_update and self.create_configs:
files_to_update = self.file_inventory.default_text_file
for version_txt in files_to_update:
if os.path.isfile(version_txt):
with io.open(version_txt, "w", encoding="utf-8") as outfile:
if self.version is None or self.version == "":
raise JiggleVersionException("Can't write version")
outfile.writelines([unicode(self.version_to_write())])
outfile.close()
changed += 1
return changed | Update ver strings in a non-python, ordinary text file in root of package (next to setup.py).
:return: | Below is the the instruction that describes the task:
### Input:
Update ver strings in a non-python, ordinary text file in root of package (next to setup.py).
:return:
### Response:
def jiggle_text_file(self): # type: () -> int
"""
Update ver strings in a non-python, ordinary text file in root of package (next to setup.py).
:return:
"""
changed = 0
files_to_update = []
for version_txt in self.file_inventory.text_files:
if os.path.isfile(version_txt):
files_to_update.append(version_txt)
if not files_to_update and self.create_configs:
files_to_update = self.file_inventory.default_text_file
for version_txt in files_to_update:
if os.path.isfile(version_txt):
with io.open(version_txt, "w", encoding="utf-8") as outfile:
if self.version is None or self.version == "":
raise JiggleVersionException("Can't write version")
outfile.writelines([unicode(self.version_to_write())])
outfile.close()
changed += 1
return changed |
def union(self, x, y):
"""Merges part that contain x and part containing y
:returns: False if x, y are already in same part
:complexity: O(inverse_ackerman(n))
"""
repr_x = self.find(x)
repr_y = self.find(y)
if repr_x == repr_y: # already in the same component
return False
if self.rank[repr_x] == self.rank[repr_y]:
self.rank[repr_x] += 1
self.up[repr_y] = repr_x
elif self.rank[repr_x] > self.rank[repr_y]:
self.up[repr_y] = repr_x
else:
self.up[repr_x] = repr_y
return True | Merges part that contain x and part containing y
:returns: False if x, y are already in same part
:complexity: O(inverse_ackerman(n)) | Below is the the instruction that describes the task:
### Input:
Merges part that contain x and part containing y
:returns: False if x, y are already in same part
:complexity: O(inverse_ackerman(n))
### Response:
def union(self, x, y):
"""Merges part that contain x and part containing y
:returns: False if x, y are already in same part
:complexity: O(inverse_ackerman(n))
"""
repr_x = self.find(x)
repr_y = self.find(y)
if repr_x == repr_y: # already in the same component
return False
if self.rank[repr_x] == self.rank[repr_y]:
self.rank[repr_x] += 1
self.up[repr_y] = repr_x
elif self.rank[repr_x] > self.rank[repr_y]:
self.up[repr_y] = repr_x
else:
self.up[repr_x] = repr_y
return True |
def _nelec(self):
""" Particles per unit lorentz factor
"""
pd = self.particle_distribution(self._gam * mec2)
return pd.to(1 / mec2_unit).value | Particles per unit lorentz factor | Below is the the instruction that describes the task:
### Input:
Particles per unit lorentz factor
### Response:
def _nelec(self):
""" Particles per unit lorentz factor
"""
pd = self.particle_distribution(self._gam * mec2)
return pd.to(1 / mec2_unit).value |
def start_plugins(conf, watcher_plugin_class, health_plugin_class,
sleep_time):
"""
Start the working threads:
- Health monitor (the health plugin)
- Config change monitor (the watcher plugin)
"""
# No matter what the chosen plugin to watch for config updates: We get a
# plugin-handle back. This gives us a start(), stop() and
# get_route_spec_queue() function. All watcher plugins provide the same
# interface.
watcher_plugin = watcher_plugin_class(conf)
watcher_plugin.start()
# Similarly for the health-monitor-plugin. It gives us a get_queues()
# function, to get the monitor-ips and failed-ips queues.
health_plugin = health_plugin_class(conf)
health_plugin.start()
return watcher_plugin, health_plugin | Start the working threads:
- Health monitor (the health plugin)
- Config change monitor (the watcher plugin) | Below is the the instruction that describes the task:
### Input:
Start the working threads:
- Health monitor (the health plugin)
- Config change monitor (the watcher plugin)
### Response:
def start_plugins(conf, watcher_plugin_class, health_plugin_class,
sleep_time):
"""
Start the working threads:
- Health monitor (the health plugin)
- Config change monitor (the watcher plugin)
"""
# No matter what the chosen plugin to watch for config updates: We get a
# plugin-handle back. This gives us a start(), stop() and
# get_route_spec_queue() function. All watcher plugins provide the same
# interface.
watcher_plugin = watcher_plugin_class(conf)
watcher_plugin.start()
# Similarly for the health-monitor-plugin. It gives us a get_queues()
# function, to get the monitor-ips and failed-ips queues.
health_plugin = health_plugin_class(conf)
health_plugin.start()
return watcher_plugin, health_plugin |
def add_log(self, x, flag_also_show=False):
"""Delegates to parent form"""
self.parent_form.add_log(x, flag_also_show) | Delegates to parent form | Below is the the instruction that describes the task:
### Input:
Delegates to parent form
### Response:
def add_log(self, x, flag_also_show=False):
"""Delegates to parent form"""
self.parent_form.add_log(x, flag_also_show) |
def err(*output, **kwargs):
"""Writes output to stderr.
:arg wrap: If you set ``wrap=False``, then ``err`` won't textwrap
the output.
"""
output = 'Error: ' + ' '.join([str(o) for o in output])
if kwargs.get('wrap') is not False:
output = '\n'.join(wrap(output, kwargs.get('indent', '')))
elif kwargs.get('indent'):
indent = kwargs['indent']
output = indent + ('\n' + indent).join(output.splitlines())
sys.stderr.write(output + '\n') | Writes output to stderr.
:arg wrap: If you set ``wrap=False``, then ``err`` won't textwrap
the output. | Below is the the instruction that describes the task:
### Input:
Writes output to stderr.
:arg wrap: If you set ``wrap=False``, then ``err`` won't textwrap
the output.
### Response:
def err(*output, **kwargs):
"""Writes output to stderr.
:arg wrap: If you set ``wrap=False``, then ``err`` won't textwrap
the output.
"""
output = 'Error: ' + ' '.join([str(o) for o in output])
if kwargs.get('wrap') is not False:
output = '\n'.join(wrap(output, kwargs.get('indent', '')))
elif kwargs.get('indent'):
indent = kwargs['indent']
output = indent + ('\n' + indent).join(output.splitlines())
sys.stderr.write(output + '\n') |
def get(self, split_id: str) -> Split:
""" load transaction by id """
query = (
self.query
.filter(Split.guid == split_id)
)
return query.one() | load transaction by id | Below is the the instruction that describes the task:
### Input:
load transaction by id
### Response:
def get(self, split_id: str) -> Split:
""" load transaction by id """
query = (
self.query
.filter(Split.guid == split_id)
)
return query.one() |
def part_name(self, basename='/tmp/sitemap.xml', part_number=0):
"""Name (file or URI) for one component sitemap.
Works for both filenames and URIs because manipulates only the end
of the string.
Abstracting this into a function that starts from the basename to get
prefix and suffix each time seems a bit wasteful but perhaps not worth
worrying about. Allows same code to be used for the write() and
as_xml_index() cases.
"""
# Work out how to name the sitemaps, attempt to add %05d before
# ".xml$", else append
sitemap_prefix = basename
sitemap_suffix = '.xml'
if (basename[-4:] == '.xml'):
sitemap_prefix = basename[:-4]
return(sitemap_prefix + ("%05d" % (part_number)) + sitemap_suffix) | Name (file or URI) for one component sitemap.
Works for both filenames and URIs because manipulates only the end
of the string.
Abstracting this into a function that starts from the basename to get
prefix and suffix each time seems a bit wasteful but perhaps not worth
worrying about. Allows same code to be used for the write() and
as_xml_index() cases. | Below is the the instruction that describes the task:
### Input:
Name (file or URI) for one component sitemap.
Works for both filenames and URIs because manipulates only the end
of the string.
Abstracting this into a function that starts from the basename to get
prefix and suffix each time seems a bit wasteful but perhaps not worth
worrying about. Allows same code to be used for the write() and
as_xml_index() cases.
### Response:
def part_name(self, basename='/tmp/sitemap.xml', part_number=0):
"""Name (file or URI) for one component sitemap.
Works for both filenames and URIs because manipulates only the end
of the string.
Abstracting this into a function that starts from the basename to get
prefix and suffix each time seems a bit wasteful but perhaps not worth
worrying about. Allows same code to be used for the write() and
as_xml_index() cases.
"""
# Work out how to name the sitemaps, attempt to add %05d before
# ".xml$", else append
sitemap_prefix = basename
sitemap_suffix = '.xml'
if (basename[-4:] == '.xml'):
sitemap_prefix = basename[:-4]
return(sitemap_prefix + ("%05d" % (part_number)) + sitemap_suffix) |
def parse_stat(self, buf):
"""
`buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
"""
metrics = []
buf.seek(0)
# Check the version line to make sure we parse the rest of the
# body correctly. Particularly, the Connections val was added in
# >= 3.4.4.
start_line = buf.readline()
match = self.version_pattern.search(start_line)
if match is None:
return (None, None, "inactive", None)
raise Exception("Could not parse version from stat command output: %s" % start_line)
else:
version = match.group()
has_connections_val = LooseVersion(version) > LooseVersion("3.4.4")
# Clients:
buf.readline() # skip the Clients: header
connections = 0
client_line = buf.readline().strip()
if client_line:
connections += 1
while client_line:
client_line = buf.readline().strip()
if client_line:
connections += 1
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(ZKMetric('zookeeper.latency.min', l_min))
metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
metrics.append(ZKMetric('zookeeper.latency.max', l_max))
# Received: 101032173
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/integrations-core/issues/816
metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))
metrics.append(ZKMetric('zookeeper.packets.received', long(value.strip()), "rate"))
# Sent: 1324
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/integrations-core/issues/816
metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))
metrics.append(ZKMetric('zookeeper.packets.sent', long(value.strip()), "rate"))
if has_connections_val:
# Connections: 1
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
else:
# If the zk version doesnt explicitly give the Connections val,
# use the value we computed from the client list.
metrics.append(ZKMetric('zookeeper.connections', connections))
# Outstanding: 0
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))
# Mode: leader
_, value = buf.readline().split(':')
mode = value.strip().lower()
tags = [u'mode:' + mode]
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))
return metrics, tags, mode, version | `buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version) | Below is the the instruction that describes the task:
### Input:
`buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
### Response:
def parse_stat(self, buf):
"""
`buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
"""
metrics = []
buf.seek(0)
# Check the version line to make sure we parse the rest of the
# body correctly. Particularly, the Connections val was added in
# >= 3.4.4.
start_line = buf.readline()
match = self.version_pattern.search(start_line)
if match is None:
return (None, None, "inactive", None)
raise Exception("Could not parse version from stat command output: %s" % start_line)
else:
version = match.group()
has_connections_val = LooseVersion(version) > LooseVersion("3.4.4")
# Clients:
buf.readline() # skip the Clients: header
connections = 0
client_line = buf.readline().strip()
if client_line:
connections += 1
while client_line:
client_line = buf.readline().strip()
if client_line:
connections += 1
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(ZKMetric('zookeeper.latency.min', l_min))
metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
metrics.append(ZKMetric('zookeeper.latency.max', l_max))
# Received: 101032173
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/integrations-core/issues/816
metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))
metrics.append(ZKMetric('zookeeper.packets.received', long(value.strip()), "rate"))
# Sent: 1324
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/integrations-core/issues/816
metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))
metrics.append(ZKMetric('zookeeper.packets.sent', long(value.strip()), "rate"))
if has_connections_val:
# Connections: 1
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
else:
# If the zk version doesnt explicitly give the Connections val,
# use the value we computed from the client list.
metrics.append(ZKMetric('zookeeper.connections', connections))
# Outstanding: 0
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))
# Mode: leader
_, value = buf.readline().split(':')
mode = value.strip().lower()
tags = [u'mode:' + mode]
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))
return metrics, tags, mode, version |
def unix_time(self, end_datetime=None, start_datetime=None):
"""
Get a timestamp between January 1, 1970 and now, unless passed
explicit start_datetime or end_datetime values.
:example 1061306726
"""
start_datetime = self._parse_start_datetime(start_datetime)
end_datetime = self._parse_end_datetime(end_datetime)
return self.generator.random.randint(start_datetime, end_datetime) | Get a timestamp between January 1, 1970 and now, unless passed
explicit start_datetime or end_datetime values.
:example 1061306726 | Below is the the instruction that describes the task:
### Input:
Get a timestamp between January 1, 1970 and now, unless passed
explicit start_datetime or end_datetime values.
:example 1061306726
### Response:
def unix_time(self, end_datetime=None, start_datetime=None):
"""
Get a timestamp between January 1, 1970 and now, unless passed
explicit start_datetime or end_datetime values.
:example 1061306726
"""
start_datetime = self._parse_start_datetime(start_datetime)
end_datetime = self._parse_end_datetime(end_datetime)
return self.generator.random.randint(start_datetime, end_datetime) |
def parseConfigFile(configFile=None):
"""Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
"""
# get config file location from ENV
if not configFile:
configFile = getConfigFile()
configs = configparser.ConfigParser(delimiters=('='), strict=False)
try:
with open(configFile, 'r', encoding='utf-8') as fh:
try:
configs.read_file(fh)
except configparser.Error:
print(traceback.format_exc())
raise SystemExit(CONFIG_FILE_PARSE_ERROR)
except IOError:
pass
return configs | Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg. | Below is the the instruction that describes the task:
### Input:
Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
### Response:
def parseConfigFile(configFile=None):
"""Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
"""
# get config file location from ENV
if not configFile:
configFile = getConfigFile()
configs = configparser.ConfigParser(delimiters=('='), strict=False)
try:
with open(configFile, 'r', encoding='utf-8') as fh:
try:
configs.read_file(fh)
except configparser.Error:
print(traceback.format_exc())
raise SystemExit(CONFIG_FILE_PARSE_ERROR)
except IOError:
pass
return configs |
def move_page_bottom(self):
"""
Move the cursor to the last item on the page.
"""
self.nav.page_index = self.content.range[1]
self.nav.cursor_index = 0
self.nav.inverted = True | Move the cursor to the last item on the page. | Below is the the instruction that describes the task:
### Input:
Move the cursor to the last item on the page.
### Response:
def move_page_bottom(self):
"""
Move the cursor to the last item on the page.
"""
self.nav.page_index = self.content.range[1]
self.nav.cursor_index = 0
self.nav.inverted = True |
def Save(session, filename=None):
"""
save your session to use it later.
Returns the filename of the written file.
If not filename is given, a file named `androguard_session_<DATE>.ag` will
be created in the current working directory.
`<DATE>` is a timestamp with the following format: `%Y-%m-%d_%H%M%S`.
This function will overwrite existing files without asking.
If the file could not written, None is returned.
example::
s = session.Session()
session.Save(s, "msession.ag")
:param session: A Session object to save
:param filename: output filename to save the session
:type filename: string
"""
if not filename:
filename = "androguard_session_{:%Y-%m-%d_%H%M%S}.ag".format(datetime.datetime.now())
if os.path.isfile(filename):
log.warning("{} already exists, overwriting!")
# Setting the recursion limit according to the documentation:
# https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled
#
# Some larger APKs require a high recursion limit.
# Tested to be above 35000 for some files, setting to 50k to be sure.
# You might want to set this even higher if you encounter problems
reclimit = sys.getrecursionlimit()
sys.setrecursionlimit(50000)
saved = False
try:
with open(filename, "wb") as fd:
pickle.dump(session, fd)
saved = True
except RecursionError:
log.exception("Recursion Limit hit while saving. "
"Current Recursion limit: {}. "
"Please report this error!".format(sys.getrecursionlimit()))
# Remove partially written file
os.unlink(filename)
sys.setrecursionlimit(reclimit)
return filename if saved else None | save your session to use it later.
Returns the filename of the written file.
If not filename is given, a file named `androguard_session_<DATE>.ag` will
be created in the current working directory.
`<DATE>` is a timestamp with the following format: `%Y-%m-%d_%H%M%S`.
This function will overwrite existing files without asking.
If the file could not written, None is returned.
example::
s = session.Session()
session.Save(s, "msession.ag")
:param session: A Session object to save
:param filename: output filename to save the session
:type filename: string | Below is the the instruction that describes the task:
### Input:
save your session to use it later.
Returns the filename of the written file.
If not filename is given, a file named `androguard_session_<DATE>.ag` will
be created in the current working directory.
`<DATE>` is a timestamp with the following format: `%Y-%m-%d_%H%M%S`.
This function will overwrite existing files without asking.
If the file could not written, None is returned.
example::
s = session.Session()
session.Save(s, "msession.ag")
:param session: A Session object to save
:param filename: output filename to save the session
:type filename: string
### Response:
def Save(session, filename=None):
"""
save your session to use it later.
Returns the filename of the written file.
If not filename is given, a file named `androguard_session_<DATE>.ag` will
be created in the current working directory.
`<DATE>` is a timestamp with the following format: `%Y-%m-%d_%H%M%S`.
This function will overwrite existing files without asking.
If the file could not written, None is returned.
example::
s = session.Session()
session.Save(s, "msession.ag")
:param session: A Session object to save
:param filename: output filename to save the session
:type filename: string
"""
if not filename:
filename = "androguard_session_{:%Y-%m-%d_%H%M%S}.ag".format(datetime.datetime.now())
if os.path.isfile(filename):
log.warning("{} already exists, overwriting!")
# Setting the recursion limit according to the documentation:
# https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled
#
# Some larger APKs require a high recursion limit.
# Tested to be above 35000 for some files, setting to 50k to be sure.
# You might want to set this even higher if you encounter problems
reclimit = sys.getrecursionlimit()
sys.setrecursionlimit(50000)
saved = False
try:
with open(filename, "wb") as fd:
pickle.dump(session, fd)
saved = True
except RecursionError:
log.exception("Recursion Limit hit while saving. "
"Current Recursion limit: {}. "
"Please report this error!".format(sys.getrecursionlimit()))
# Remove partially written file
os.unlink(filename)
sys.setrecursionlimit(reclimit)
return filename if saved else None |
def localize(self):
"""
Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node. On
the leader, this method returns this resource, i.e. self.
:rtype: toil.resource.Resource
"""
if not self._runningOnWorker():
log.warn('The localize() method should only be invoked on a worker.')
resource = Resource.lookup(self._resourcePath)
if resource is None:
return self
else:
def stash(tmpDirPath):
# Save the original dirPath such that we can restore it in globalize()
with open(os.path.join(tmpDirPath, '.stash'), 'w') as f:
f.write('1' if self.fromVirtualEnv else '0')
f.write(self.dirPath)
resource.download(callback=stash)
return self.__class__(dirPath=resource.localDirPath,
name=self.name,
fromVirtualEnv=self.fromVirtualEnv) | Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node. On
the leader, this method returns this resource, i.e. self.
:rtype: toil.resource.Resource | Below is the the instruction that describes the task:
### Input:
Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node. On
the leader, this method returns this resource, i.e. self.
:rtype: toil.resource.Resource
### Response:
def localize(self):
"""
Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node. On
the leader, this method returns this resource, i.e. self.
:rtype: toil.resource.Resource
"""
if not self._runningOnWorker():
log.warn('The localize() method should only be invoked on a worker.')
resource = Resource.lookup(self._resourcePath)
if resource is None:
return self
else:
def stash(tmpDirPath):
# Save the original dirPath such that we can restore it in globalize()
with open(os.path.join(tmpDirPath, '.stash'), 'w') as f:
f.write('1' if self.fromVirtualEnv else '0')
f.write(self.dirPath)
resource.download(callback=stash)
return self.__class__(dirPath=resource.localDirPath,
name=self.name,
fromVirtualEnv=self.fromVirtualEnv) |
def Network_setCookie(self, name, value, **kwargs):
"""
Function path: Network.setCookie
Domain: Network
Method name: setCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'name' (type: string) -> Cookie name.
'value' (type: string) -> Cookie value.
Optional arguments:
'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie.
'domain' (type: string) -> Cookie domain.
'path' (type: string) -> Cookie path.
'secure' (type: boolean) -> True if cookie is secure.
'httpOnly' (type: boolean) -> True if cookie is http-only.
'sameSite' (type: CookieSameSite) -> Cookie SameSite type.
'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set
Returns:
'success' (type: boolean) -> True if successfully set cookie.
Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist.
"""
assert isinstance(name, (str,)
), "Argument 'name' must be of type '['str']'. Received type: '%s'" % type(
name)
assert isinstance(value, (str,)
), "Argument 'value' must be of type '['str']'. Received type: '%s'" % type(
value)
if 'url' in kwargs:
assert isinstance(kwargs['url'], (str,)
), "Optional argument 'url' must be of type '['str']'. Received type: '%s'" % type(
kwargs['url'])
if 'domain' in kwargs:
assert isinstance(kwargs['domain'], (str,)
), "Optional argument 'domain' must be of type '['str']'. Received type: '%s'" % type(
kwargs['domain'])
if 'path' in kwargs:
assert isinstance(kwargs['path'], (str,)
), "Optional argument 'path' must be of type '['str']'. Received type: '%s'" % type(
kwargs['path'])
if 'secure' in kwargs:
assert isinstance(kwargs['secure'], (bool,)
), "Optional argument 'secure' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['secure'])
if 'httpOnly' in kwargs:
assert isinstance(kwargs['httpOnly'], (bool,)
), "Optional argument 'httpOnly' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['httpOnly'])
expected = ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite',
'expires']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite', 'expires']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Network.setCookie', name=name,
value=value, **kwargs)
return subdom_funcs | Function path: Network.setCookie
Domain: Network
Method name: setCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'name' (type: string) -> Cookie name.
'value' (type: string) -> Cookie value.
Optional arguments:
'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie.
'domain' (type: string) -> Cookie domain.
'path' (type: string) -> Cookie path.
'secure' (type: boolean) -> True if cookie is secure.
'httpOnly' (type: boolean) -> True if cookie is http-only.
'sameSite' (type: CookieSameSite) -> Cookie SameSite type.
'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set
Returns:
'success' (type: boolean) -> True if successfully set cookie.
Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist. | Below is the the instruction that describes the task:
### Input:
Function path: Network.setCookie
Domain: Network
Method name: setCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'name' (type: string) -> Cookie name.
'value' (type: string) -> Cookie value.
Optional arguments:
'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie.
'domain' (type: string) -> Cookie domain.
'path' (type: string) -> Cookie path.
'secure' (type: boolean) -> True if cookie is secure.
'httpOnly' (type: boolean) -> True if cookie is http-only.
'sameSite' (type: CookieSameSite) -> Cookie SameSite type.
'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set
Returns:
'success' (type: boolean) -> True if successfully set cookie.
Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist.
### Response:
def Network_setCookie(self, name, value, **kwargs):
"""
Function path: Network.setCookie
Domain: Network
Method name: setCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'name' (type: string) -> Cookie name.
'value' (type: string) -> Cookie value.
Optional arguments:
'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie.
'domain' (type: string) -> Cookie domain.
'path' (type: string) -> Cookie path.
'secure' (type: boolean) -> True if cookie is secure.
'httpOnly' (type: boolean) -> True if cookie is http-only.
'sameSite' (type: CookieSameSite) -> Cookie SameSite type.
'expires' (type: TimeSinceEpoch) -> Cookie expiration date, session cookie if not set
Returns:
'success' (type: boolean) -> True if successfully set cookie.
Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist.
"""
assert isinstance(name, (str,)
), "Argument 'name' must be of type '['str']'. Received type: '%s'" % type(
name)
assert isinstance(value, (str,)
), "Argument 'value' must be of type '['str']'. Received type: '%s'" % type(
value)
if 'url' in kwargs:
assert isinstance(kwargs['url'], (str,)
), "Optional argument 'url' must be of type '['str']'. Received type: '%s'" % type(
kwargs['url'])
if 'domain' in kwargs:
assert isinstance(kwargs['domain'], (str,)
), "Optional argument 'domain' must be of type '['str']'. Received type: '%s'" % type(
kwargs['domain'])
if 'path' in kwargs:
assert isinstance(kwargs['path'], (str,)
), "Optional argument 'path' must be of type '['str']'. Received type: '%s'" % type(
kwargs['path'])
if 'secure' in kwargs:
assert isinstance(kwargs['secure'], (bool,)
), "Optional argument 'secure' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['secure'])
if 'httpOnly' in kwargs:
assert isinstance(kwargs['httpOnly'], (bool,)
), "Optional argument 'httpOnly' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['httpOnly'])
expected = ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite',
'expires']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['url', 'domain', 'path', 'secure', 'httpOnly', 'sameSite', 'expires']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Network.setCookie', name=name,
value=value, **kwargs)
return subdom_funcs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.