code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def trajectory_SgConst(Sg=0.1, delta_logt_dex=-0.01):
'''
setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01.
'''
# reverse logarithmic time
logtimerev=np.arange(5.,-6.,delta_logt_dex)
logrho=np.linspace(0,8.5,len(logtimerev))
logT = (old_div(1.,3.))*(logrho + 21.9161 + np.log10(Sg))
#rho_6=10**logrho/(0.1213*1.e6)
#T9=rho_6**(1./3.)
#logT_T3=np.log10(T9*1.e9)
pl.close(3);pl.figure(3);pl.plot(logrho,logT,label='$S/\mathrm{N_Ak}='+str(Sg)+'$')
pl.legend(loc=2);pl.xlabel('$\log \\rho$'); pl.ylabel('$\log T$')
pl.close(5);pl.figure(5);pl.plot(logtimerev, logrho)
pl.xlabel('$\log (t_\mathrm{final}-t)$'); pl.ylabel('$\log \\rho$')
pl.xlim(8,-6)
pl.close(6);pl.figure(6);pl.plot(logtimerev)
pl.ylabel('$\log (t_\mathrm{final}-t)$'); pl.xlabel('cycle')
# [t] logtimerev yrs
# [rho] cgs
# [T] K
T9=old_div(10**logT,1.e9)
data=[logtimerev,T9,logrho]
att.writeTraj(filename='trajectory.input', data=data, ageunit=2, tunit=1, rhounit=1, idNum=1) | setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01. | Below is the the instruction that describes the task:
### Input:
setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01.
### Response:
def trajectory_SgConst(Sg=0.1, delta_logt_dex=-0.01):
'''
setup trajectories for constant radiation entropy.
S_gamma/R where the radiation constant R = N_A*k
(Dave Arnett, Supernova book, p. 212)
This relates rho and T but the time scale for this
is independent.
Parameters
----------
Sg : float
S_gamma/R, values between 0.1 and 10. reflect conditions in
massive stars. The default is 0.1.
delta_logt_dex : float
Sets interval between time steps in dex of logtimerev. The
default is -0.01.
'''
# reverse logarithmic time
logtimerev=np.arange(5.,-6.,delta_logt_dex)
logrho=np.linspace(0,8.5,len(logtimerev))
logT = (old_div(1.,3.))*(logrho + 21.9161 + np.log10(Sg))
#rho_6=10**logrho/(0.1213*1.e6)
#T9=rho_6**(1./3.)
#logT_T3=np.log10(T9*1.e9)
pl.close(3);pl.figure(3);pl.plot(logrho,logT,label='$S/\mathrm{N_Ak}='+str(Sg)+'$')
pl.legend(loc=2);pl.xlabel('$\log \\rho$'); pl.ylabel('$\log T$')
pl.close(5);pl.figure(5);pl.plot(logtimerev, logrho)
pl.xlabel('$\log (t_\mathrm{final}-t)$'); pl.ylabel('$\log \\rho$')
pl.xlim(8,-6)
pl.close(6);pl.figure(6);pl.plot(logtimerev)
pl.ylabel('$\log (t_\mathrm{final}-t)$'); pl.xlabel('cycle')
# [t] logtimerev yrs
# [rho] cgs
# [T] K
T9=old_div(10**logT,1.e9)
data=[logtimerev,T9,logrho]
att.writeTraj(filename='trajectory.input', data=data, ageunit=2, tunit=1, rhounit=1, idNum=1) |
def raw_writer(indent=None):
"""Returns a raw text writer co-routine.
Yields:
DataEvent: serialization events to write out
Receives :class:`amazon.ion.core.IonEvent` or ``None`` when the co-routine yields
``HAS_PENDING`` :class:`WriteEventType` events.
"""
is_whitespace_str = isinstance(indent, str) and re.search(r'\A\s*\Z', indent, re.M) is not None
if not (indent is None or is_whitespace_str):
raise ValueError('The indent parameter must either be None or a string containing only whitespace')
indent_bytes = six.b(indent) if isinstance(indent, str) else indent
return writer_trampoline(_raw_writer_coroutine(indent=indent_bytes)) | Returns a raw text writer co-routine.
Yields:
DataEvent: serialization events to write out
Receives :class:`amazon.ion.core.IonEvent` or ``None`` when the co-routine yields
``HAS_PENDING`` :class:`WriteEventType` events. | Below is the the instruction that describes the task:
### Input:
Returns a raw text writer co-routine.
Yields:
DataEvent: serialization events to write out
Receives :class:`amazon.ion.core.IonEvent` or ``None`` when the co-routine yields
``HAS_PENDING`` :class:`WriteEventType` events.
### Response:
def raw_writer(indent=None):
"""Returns a raw text writer co-routine.
Yields:
DataEvent: serialization events to write out
Receives :class:`amazon.ion.core.IonEvent` or ``None`` when the co-routine yields
``HAS_PENDING`` :class:`WriteEventType` events.
"""
is_whitespace_str = isinstance(indent, str) and re.search(r'\A\s*\Z', indent, re.M) is not None
if not (indent is None or is_whitespace_str):
raise ValueError('The indent parameter must either be None or a string containing only whitespace')
indent_bytes = six.b(indent) if isinstance(indent, str) else indent
return writer_trampoline(_raw_writer_coroutine(indent=indent_bytes)) |
def parse_session_cookie(cookie_to_cook):
""" cookie_to_cook = http_header['cookie']
"""
#print("cookie_to_cook: %s"%str(cookie_to_cook))
session_value = None
tokens = cookie_to_cook.split(";")
for tok in tokens:
if 'remi_session=' in tok:
#print("found session id: %s"%str(tok))
try:
session_value = int(tok.replace('remi_session=', ''))
except:
pass
return session_value | cookie_to_cook = http_header['cookie'] | Below is the the instruction that describes the task:
### Input:
cookie_to_cook = http_header['cookie']
### Response:
def parse_session_cookie(cookie_to_cook):
""" cookie_to_cook = http_header['cookie']
"""
#print("cookie_to_cook: %s"%str(cookie_to_cook))
session_value = None
tokens = cookie_to_cook.split(";")
for tok in tokens:
if 'remi_session=' in tok:
#print("found session id: %s"%str(tok))
try:
session_value = int(tok.replace('remi_session=', ''))
except:
pass
return session_value |
def writeToProto(self, proto):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
Write state to proto object.
:param proto: TMRegionProto capnproto object
"""
proto.temporalImp = self.temporalImp
proto.columnCount = self.columnCount
proto.inputWidth = self.inputWidth
proto.cellsPerColumn = self.cellsPerColumn
proto.learningMode = self.learningMode
proto.inferenceMode = self.inferenceMode
proto.anomalyMode = self.anomalyMode
proto.topDownMode = self.topDownMode
proto.computePredictedActiveCellIndices = (
self.computePredictedActiveCellIndices)
proto.orColumnOutputs = self.orColumnOutputs
if self.temporalImp == "py":
tmProto = proto.init("backtrackingTM")
elif self.temporalImp == "cpp":
tmProto = proto.init("backtrackingTMCpp")
elif self.temporalImp == "tm_py":
tmProto = proto.init("temporalMemory")
elif self.temporalImp == "tm_cpp":
tmProto = proto.init("temporalMemory")
else:
raise TypeError(
"Unsupported temporalImp for capnp serialization: {}".format(
self.temporalImp))
self._tfdr.write(tmProto) | Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
Write state to proto object.
:param proto: TMRegionProto capnproto object | Below is the the instruction that describes the task:
### Input:
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
Write state to proto object.
:param proto: TMRegionProto capnproto object
### Response:
def writeToProto(self, proto):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
Write state to proto object.
:param proto: TMRegionProto capnproto object
"""
proto.temporalImp = self.temporalImp
proto.columnCount = self.columnCount
proto.inputWidth = self.inputWidth
proto.cellsPerColumn = self.cellsPerColumn
proto.learningMode = self.learningMode
proto.inferenceMode = self.inferenceMode
proto.anomalyMode = self.anomalyMode
proto.topDownMode = self.topDownMode
proto.computePredictedActiveCellIndices = (
self.computePredictedActiveCellIndices)
proto.orColumnOutputs = self.orColumnOutputs
if self.temporalImp == "py":
tmProto = proto.init("backtrackingTM")
elif self.temporalImp == "cpp":
tmProto = proto.init("backtrackingTMCpp")
elif self.temporalImp == "tm_py":
tmProto = proto.init("temporalMemory")
elif self.temporalImp == "tm_cpp":
tmProto = proto.init("temporalMemory")
else:
raise TypeError(
"Unsupported temporalImp for capnp serialization: {}".format(
self.temporalImp))
self._tfdr.write(tmProto) |
def create_token(self, user_id, permission_obj):
""" 'permission_obj' param should be a string.
e.g. '[{"access":"d_u_list","oid":{"id":"1576946496","type":"Domain"}}]'
http://docs.exosite.com/portals/#add-user-permission
"""
headers = {
'User-Agent': self.user_agent(),
'Content-Type': self.content_type()
}
headers.update(self.headers())
url = self.portals_url()+'/users/{0}/permissions'.format(user_id)
# print("URL: {0}".format(url))
r = requests.post( url,
data=permission_obj,
headers=headers,
auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("create_token: Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
r.raise_for_status() | 'permission_obj' param should be a string.
e.g. '[{"access":"d_u_list","oid":{"id":"1576946496","type":"Domain"}}]'
http://docs.exosite.com/portals/#add-user-permission | Below is the the instruction that describes the task:
### Input:
'permission_obj' param should be a string.
e.g. '[{"access":"d_u_list","oid":{"id":"1576946496","type":"Domain"}}]'
http://docs.exosite.com/portals/#add-user-permission
### Response:
def create_token(self, user_id, permission_obj):
""" 'permission_obj' param should be a string.
e.g. '[{"access":"d_u_list","oid":{"id":"1576946496","type":"Domain"}}]'
http://docs.exosite.com/portals/#add-user-permission
"""
headers = {
'User-Agent': self.user_agent(),
'Content-Type': self.content_type()
}
headers.update(self.headers())
url = self.portals_url()+'/users/{0}/permissions'.format(user_id)
# print("URL: {0}".format(url))
r = requests.post( url,
data=permission_obj,
headers=headers,
auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("create_token: Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
r.raise_for_status() |
def output_data_ports(self, output_data_ports):
""" Setter for _output_data_ports field
See property
:param dict output_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.OutputDataPort`
:raises exceptions.TypeError: if the output_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the output dictionary and the id of the data port do not match
"""
if not isinstance(output_data_ports, dict):
raise TypeError("output_data_ports must be of type dict")
if [port_id for port_id, port in output_data_ports.items() if not port_id == port.data_port_id]:
raise AttributeError("The key of the output dictionary and the id of the data port do not match")
# This is a fix for older state machines, which didn't distinguish between input and output ports
for port_id, port in output_data_ports.items():
if not isinstance(port, OutputDataPort):
if isinstance(port, DataPort):
port = OutputDataPort(port.name, port.data_type, port.default_value, port.data_port_id)
output_data_ports[port_id] = port
else:
raise TypeError("Elements of output_data_ports must be of type OutputDataPort, given: {0}".format(
type(port).__name__))
old_output_data_ports = self._output_data_ports
self._output_data_ports = output_data_ports
for port_id, port in output_data_ports.items():
try:
port.parent = self
except ValueError:
self._output_data_ports = old_output_data_ports
raise
# check that all old_output_data_ports are no more referencing self as there parent
for old_output_data_port in old_output_data_ports.values():
if old_output_data_port not in self._output_data_ports.values() and old_output_data_port.parent is self:
old_output_data_port.parent = None | Setter for _output_data_ports field
See property
:param dict output_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.OutputDataPort`
:raises exceptions.TypeError: if the output_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the output dictionary and the id of the data port do not match | Below is the the instruction that describes the task:
### Input:
Setter for _output_data_ports field
See property
:param dict output_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.OutputDataPort`
:raises exceptions.TypeError: if the output_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the output dictionary and the id of the data port do not match
### Response:
def output_data_ports(self, output_data_ports):
""" Setter for _output_data_ports field
See property
:param dict output_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.OutputDataPort`
:raises exceptions.TypeError: if the output_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the output dictionary and the id of the data port do not match
"""
if not isinstance(output_data_ports, dict):
raise TypeError("output_data_ports must be of type dict")
if [port_id for port_id, port in output_data_ports.items() if not port_id == port.data_port_id]:
raise AttributeError("The key of the output dictionary and the id of the data port do not match")
# This is a fix for older state machines, which didn't distinguish between input and output ports
for port_id, port in output_data_ports.items():
if not isinstance(port, OutputDataPort):
if isinstance(port, DataPort):
port = OutputDataPort(port.name, port.data_type, port.default_value, port.data_port_id)
output_data_ports[port_id] = port
else:
raise TypeError("Elements of output_data_ports must be of type OutputDataPort, given: {0}".format(
type(port).__name__))
old_output_data_ports = self._output_data_ports
self._output_data_ports = output_data_ports
for port_id, port in output_data_ports.items():
try:
port.parent = self
except ValueError:
self._output_data_ports = old_output_data_ports
raise
# check that all old_output_data_ports are no more referencing self as there parent
for old_output_data_port in old_output_data_ports.values():
if old_output_data_port not in self._output_data_ports.values() and old_output_data_port.parent is self:
old_output_data_port.parent = None |
def bind(_self, **kwargs):
"""Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
1
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
"""
return Logger(
{**_self._extra, **kwargs},
_self._exception,
_self._record,
_self._lazy,
_self._ansi,
_self._raw,
_self._depth,
) | Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
1
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance | Below is the the instruction that describes the task:
### Input:
Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
1
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
### Response:
def bind(_self, **kwargs):
"""Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
1
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
"""
return Logger(
{**_self._extra, **kwargs},
_self._exception,
_self._record,
_self._lazy,
_self._ansi,
_self._raw,
_self._depth,
) |
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(
modules, self.config.black_list, self.config.black_list_re
)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, "")
self.add_message(key, args=message)
return result | get modules and errors from a list of modules and handle errors | Below is the the instruction that describes the task:
### Input:
get modules and errors from a list of modules and handle errors
### Response:
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(
modules, self.config.black_list, self.config.black_list_re
)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, "")
self.add_message(key, args=message)
return result |
def extract_lzh (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract a LZH archive."""
opts = 'x'
if verbosity > 1:
opts += 'v'
opts += "w=%s" % outdir
return [cmd, opts, archive] | Extract a LZH archive. | Below is the the instruction that describes the task:
### Input:
Extract a LZH archive.
### Response:
def extract_lzh (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract a LZH archive."""
opts = 'x'
if verbosity > 1:
opts += 'v'
opts += "w=%s" % outdir
return [cmd, opts, archive] |
def delete_by_id(
self, application_definition_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>`
"""
raw_result = self._delete_by_id_initial(
application_definition_id=application_definition_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>` | Below is the the instruction that describes the task:
### Input:
Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>`
### Response:
def delete_by_id(
self, application_definition_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the managed application definition.
:param application_definition_id: The fully qualified ID of the
managed application definition, including the managed application name
and the managed application definition resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name}
:type application_definition_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>`
"""
raw_result = self._delete_by_id_initial(
application_definition_id=application_definition_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def attach_keypress(fig, scaling=1.1):
"""
Attach a key press event handler that configures keys for closing a
figure and changing the figure size. Keys 'e' and 'c' respectively
expand and contract the figure, and key 'q' closes it.
**Note:** Resizing may not function correctly with all matplotlib
backends (a
`bug <https://github.com/matplotlib/matplotlib/issues/10083>`__
has been reported).
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object
Figure to which event handling is to be attached
scaling : float, optional (default 1.1)
Scaling factor for figure size changes
Returns
-------
press : function
Key press event handler function
"""
def press(event):
if event.key == 'q':
plt.close(fig)
elif event.key == 'e':
fig.set_size_inches(scaling * fig.get_size_inches(), forward=True)
elif event.key == 'c':
fig.set_size_inches(fig.get_size_inches() / scaling, forward=True)
# Avoid multiple event handlers attached to the same figure
if not hasattr(fig, '_sporco_keypress_cid'):
cid = fig.canvas.mpl_connect('key_press_event', press)
fig._sporco_keypress_cid = cid
return press | Attach a key press event handler that configures keys for closing a
figure and changing the figure size. Keys 'e' and 'c' respectively
expand and contract the figure, and key 'q' closes it.
**Note:** Resizing may not function correctly with all matplotlib
backends (a
`bug <https://github.com/matplotlib/matplotlib/issues/10083>`__
has been reported).
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object
Figure to which event handling is to be attached
scaling : float, optional (default 1.1)
Scaling factor for figure size changes
Returns
-------
press : function
Key press event handler function | Below is the the instruction that describes the task:
### Input:
Attach a key press event handler that configures keys for closing a
figure and changing the figure size. Keys 'e' and 'c' respectively
expand and contract the figure, and key 'q' closes it.
**Note:** Resizing may not function correctly with all matplotlib
backends (a
`bug <https://github.com/matplotlib/matplotlib/issues/10083>`__
has been reported).
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object
Figure to which event handling is to be attached
scaling : float, optional (default 1.1)
Scaling factor for figure size changes
Returns
-------
press : function
Key press event handler function
### Response:
def attach_keypress(fig, scaling=1.1):
"""
Attach a key press event handler that configures keys for closing a
figure and changing the figure size. Keys 'e' and 'c' respectively
expand and contract the figure, and key 'q' closes it.
**Note:** Resizing may not function correctly with all matplotlib
backends (a
`bug <https://github.com/matplotlib/matplotlib/issues/10083>`__
has been reported).
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object
Figure to which event handling is to be attached
scaling : float, optional (default 1.1)
Scaling factor for figure size changes
Returns
-------
press : function
Key press event handler function
"""
def press(event):
if event.key == 'q':
plt.close(fig)
elif event.key == 'e':
fig.set_size_inches(scaling * fig.get_size_inches(), forward=True)
elif event.key == 'c':
fig.set_size_inches(fig.get_size_inches() / scaling, forward=True)
# Avoid multiple event handlers attached to the same figure
if not hasattr(fig, '_sporco_keypress_cid'):
cid = fig.canvas.mpl_connect('key_press_event', press)
fig._sporco_keypress_cid = cid
return press |
def _matplotlib_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None):
"""Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify)
qcd = _matplotlib.MatplotlibDrawer(qregs, cregs, ops, scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits)
return qcd.draw(filename) | Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram | Below is the the instruction that describes the task:
### Input:
Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
### Response:
def _matplotlib_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None):
"""Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify)
qcd = _matplotlib.MatplotlibDrawer(qregs, cregs, ops, scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits)
return qcd.draw(filename) |
def replace_runtime_class(self, name, body, **kwargs):
"""
replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_runtime_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_runtime_class_with_http_info(name, body, **kwargs)
return data | replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_runtime_class(self, name, body, **kwargs):
"""
replace the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_runtime_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_runtime_class_with_http_info(name, body, **kwargs)
return data |
def normalize(numbers, total=1.0): #from AI: A Modern Appproach
"""Multiply each number by a constant such that the sum is 1.0 (or total).
>>> normalize([1,2,1])
[0.25, 0.5, 0.25]
"""
k = total / sum(numbers)
return [k * n for n in numbers] | Multiply each number by a constant such that the sum is 1.0 (or total).
>>> normalize([1,2,1])
[0.25, 0.5, 0.25] | Below is the the instruction that describes the task:
### Input:
Multiply each number by a constant such that the sum is 1.0 (or total).
>>> normalize([1,2,1])
[0.25, 0.5, 0.25]
### Response:
def normalize(numbers, total=1.0): #from AI: A Modern Appproach
"""Multiply each number by a constant such that the sum is 1.0 (or total).
>>> normalize([1,2,1])
[0.25, 0.5, 0.25]
"""
k = total / sum(numbers)
return [k * n for n in numbers] |
def _generate_provenance(self):
"""Function to generate provenance at the end of the IF."""
# noinspection PyTypeChecker
hazard = definition(
self._provenance['hazard_keywords']['hazard'])
exposures = [
definition(layer.keywords['exposure']) for layer in self.exposures
]
# InaSAFE
set_provenance(
self._provenance, provenance_impact_function_name, self.name)
set_provenance(
self._provenance,
provenance_analysis_extent,
self._analysis_extent.asWkt())
set_provenance(
self._provenance,
provenance_analysis_question,
get_multi_exposure_analysis_question(hazard, exposures))
set_provenance(
self._provenance,
provenance_data_store_uri,
self.datastore.uri_path)
# Map title
set_provenance(self._provenance, provenance_map_title, self.name)
# CRS
set_provenance(
self._provenance, provenance_crs, self._crs.authid())
# Debug mode
set_provenance(
self._provenance, provenance_debug_mode, self.debug_mode)
self._provenance_ready = True | Function to generate provenance at the end of the IF. | Below is the the instruction that describes the task:
### Input:
Function to generate provenance at the end of the IF.
### Response:
def _generate_provenance(self):
"""Function to generate provenance at the end of the IF."""
# noinspection PyTypeChecker
hazard = definition(
self._provenance['hazard_keywords']['hazard'])
exposures = [
definition(layer.keywords['exposure']) for layer in self.exposures
]
# InaSAFE
set_provenance(
self._provenance, provenance_impact_function_name, self.name)
set_provenance(
self._provenance,
provenance_analysis_extent,
self._analysis_extent.asWkt())
set_provenance(
self._provenance,
provenance_analysis_question,
get_multi_exposure_analysis_question(hazard, exposures))
set_provenance(
self._provenance,
provenance_data_store_uri,
self.datastore.uri_path)
# Map title
set_provenance(self._provenance, provenance_map_title, self.name)
# CRS
set_provenance(
self._provenance, provenance_crs, self._crs.authid())
# Debug mode
set_provenance(
self._provenance, provenance_debug_mode, self.debug_mode)
self._provenance_ready = True |
def run_expect_command(self, cmd, expect_end=None, timeout=3, wait_seconds=2):
"""
执行 shell 命令并获取返回结果
:param timeout:
:param wait_seconds:
:param cmd:
:param expect_end:
:return:
"""
shell = self.ssh_session.invoke_shell()
last_time = int(time.time())
if not cmd.endswith('\n'):
cmd += '\n'
def receive():
buff = ''
if expect_end is None:
buff = shell.recv(9999)
else:
while not buff.endswith(expect_end):
resp = shell.recv(9999)
buff += force_text(resp)
now = int(time.time())
if now - last_time > timeout:
break
buff = force_text(buff)
logger.info(buff)
return buff
logger.info(cmd)
shell.send(cmd)
time.sleep(wait_seconds)
return receive() | 执行 shell 命令并获取返回结果
:param timeout:
:param wait_seconds:
:param cmd:
:param expect_end:
:return: | Below is the the instruction that describes the task:
### Input:
执行 shell 命令并获取返回结果
:param timeout:
:param wait_seconds:
:param cmd:
:param expect_end:
:return:
### Response:
def run_expect_command(self, cmd, expect_end=None, timeout=3, wait_seconds=2):
"""
执行 shell 命令并获取返回结果
:param timeout:
:param wait_seconds:
:param cmd:
:param expect_end:
:return:
"""
shell = self.ssh_session.invoke_shell()
last_time = int(time.time())
if not cmd.endswith('\n'):
cmd += '\n'
def receive():
buff = ''
if expect_end is None:
buff = shell.recv(9999)
else:
while not buff.endswith(expect_end):
resp = shell.recv(9999)
buff += force_text(resp)
now = int(time.time())
if now - last_time > timeout:
break
buff = force_text(buff)
logger.info(buff)
return buff
logger.info(cmd)
shell.send(cmd)
time.sleep(wait_seconds)
return receive() |
def baseline_or_audit(self, allow_deletion=False, audit_only=False):
"""Baseline synchonization or audit.
Both functions implemented in this routine because audit is a prerequisite
for a baseline sync. In the case of baseline sync the last timestamp seen
is recorded as client state.
"""
action = ('audit' if (audit_only) else 'baseline sync')
self.logger.debug("Starting " + action)
# 0. Sanity checks
if (len(self.mapper) < 1):
raise ClientFatalError(
"No source to destination mapping specified")
if (not audit_only and self.mapper.unsafe()):
raise ClientFatalError(
"Source to destination mappings unsafe: %s" %
str(self.mapper))
# 1. Get inventories from both src and dst
# 1.a source resource list
src_resource_list = self.find_resource_list()
self.logger.info(
"Read source resource list, %d resources listed" %
(len(src_resource_list)))
if (len(src_resource_list) == 0):
raise ClientFatalError(
"Aborting as there are no resources to sync")
if (len(self.hashes) > 0):
self.prune_hashes(src_resource_list.hashes(), 'resource')
# 1.b destination resource list mapped back to source URIs
rlb = ResourceListBuilder(set_hashes=self.hashes, mapper=self.mapper)
dst_resource_list = rlb.from_disk()
# 2. Compare these resource lists respecting any comparison options
(same, updated, deleted, created) = dst_resource_list.compare(src_resource_list)
# 3. Report status and planned actions
self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0),
audit=True, same=len(same), created=len(created),
updated=len(updated), deleted=len(deleted))
if (audit_only or len(created) + len(updated) + len(deleted) == 0):
self.logger.debug("Completed " + action)
return
# 4. Check that sitemap has authority over URIs listed
if (not self.noauth):
uauth = UrlAuthority(self.sitemap, strict=self.strictauth)
for resource in src_resource_list:
if (not uauth.has_authority_over(resource.uri)):
raise ClientFatalError(
"Aborting as sitemap (%s) mentions resource at a location it does not have authority over (%s), override with --noauth" %
(self.sitemap, resource.uri))
# 5. Grab files to do sync
delete_msg = (
", and delete %d resources" %
len(deleted)) if (allow_deletion) else ''
self.logger.warning(
"Will GET %d resources%s" %
(len(created) + len(updated), delete_msg))
self.last_timestamp = 0
num_created = 0
num_updated = 0
num_deleted = 0
for resource in created:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
self.logger.info("created: %s -> %s" % (uri, filename))
num_created += self.update_resource(resource, filename, 'created')
for resource in updated:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
self.logger.info("updated: %s -> %s" % (uri, filename))
num_updated += self.update_resource(resource, filename, 'updated')
for resource in deleted:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
num_deleted += self.delete_resource(resource,
filename, allow_deletion)
# 6. Store last timestamp to allow incremental sync
if (not audit_only and self.last_timestamp > 0):
ClientState().set_state(self.sitemap, self.last_timestamp)
self.logger.info(
"Written last timestamp %s for incremental sync" %
(datetime_to_str(
self.last_timestamp)))
# 7. Done
self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0),
same=len(same), created=num_created,
updated=num_updated, deleted=num_deleted, to_delete=len(deleted))
self.logger.debug("Completed %s" % (action)) | Baseline synchonization or audit.
Both functions implemented in this routine because audit is a prerequisite
for a baseline sync. In the case of baseline sync the last timestamp seen
is recorded as client state. | Below is the the instruction that describes the task:
### Input:
Baseline synchonization or audit.
Both functions implemented in this routine because audit is a prerequisite
for a baseline sync. In the case of baseline sync the last timestamp seen
is recorded as client state.
### Response:
def baseline_or_audit(self, allow_deletion=False, audit_only=False):
"""Baseline synchonization or audit.
Both functions implemented in this routine because audit is a prerequisite
for a baseline sync. In the case of baseline sync the last timestamp seen
is recorded as client state.
"""
action = ('audit' if (audit_only) else 'baseline sync')
self.logger.debug("Starting " + action)
# 0. Sanity checks
if (len(self.mapper) < 1):
raise ClientFatalError(
"No source to destination mapping specified")
if (not audit_only and self.mapper.unsafe()):
raise ClientFatalError(
"Source to destination mappings unsafe: %s" %
str(self.mapper))
# 1. Get inventories from both src and dst
# 1.a source resource list
src_resource_list = self.find_resource_list()
self.logger.info(
"Read source resource list, %d resources listed" %
(len(src_resource_list)))
if (len(src_resource_list) == 0):
raise ClientFatalError(
"Aborting as there are no resources to sync")
if (len(self.hashes) > 0):
self.prune_hashes(src_resource_list.hashes(), 'resource')
# 1.b destination resource list mapped back to source URIs
rlb = ResourceListBuilder(set_hashes=self.hashes, mapper=self.mapper)
dst_resource_list = rlb.from_disk()
# 2. Compare these resource lists respecting any comparison options
(same, updated, deleted, created) = dst_resource_list.compare(src_resource_list)
# 3. Report status and planned actions
self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0),
audit=True, same=len(same), created=len(created),
updated=len(updated), deleted=len(deleted))
if (audit_only or len(created) + len(updated) + len(deleted) == 0):
self.logger.debug("Completed " + action)
return
# 4. Check that sitemap has authority over URIs listed
if (not self.noauth):
uauth = UrlAuthority(self.sitemap, strict=self.strictauth)
for resource in src_resource_list:
if (not uauth.has_authority_over(resource.uri)):
raise ClientFatalError(
"Aborting as sitemap (%s) mentions resource at a location it does not have authority over (%s), override with --noauth" %
(self.sitemap, resource.uri))
# 5. Grab files to do sync
delete_msg = (
", and delete %d resources" %
len(deleted)) if (allow_deletion) else ''
self.logger.warning(
"Will GET %d resources%s" %
(len(created) + len(updated), delete_msg))
self.last_timestamp = 0
num_created = 0
num_updated = 0
num_deleted = 0
for resource in created:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
self.logger.info("created: %s -> %s" % (uri, filename))
num_created += self.update_resource(resource, filename, 'created')
for resource in updated:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
self.logger.info("updated: %s -> %s" % (uri, filename))
num_updated += self.update_resource(resource, filename, 'updated')
for resource in deleted:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
num_deleted += self.delete_resource(resource,
filename, allow_deletion)
# 6. Store last timestamp to allow incremental sync
if (not audit_only and self.last_timestamp > 0):
ClientState().set_state(self.sitemap, self.last_timestamp)
self.logger.info(
"Written last timestamp %s for incremental sync" %
(datetime_to_str(
self.last_timestamp)))
# 7. Done
self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0),
same=len(same), created=num_created,
updated=num_updated, deleted=num_deleted, to_delete=len(deleted))
self.logger.debug("Completed %s" % (action)) |
def get_vnet(access_token, subscription_id, resource_group, vnet_name):
'''Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/virtualNetworks/', vnet_name,
'?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body. | Below is the the instruction that describes the task:
### Input:
Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
### Response:
def get_vnet(access_token, subscription_id, resource_group, vnet_name):
'''Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/virtualNetworks/', vnet_name,
'?api-version=', NETWORK_API])
return do_get(endpoint, access_token) |
def require_session(handler):
""" Decorator to ensure a session is properly in the request """
@functools.wraps(handler)
async def decorated(request: web.Request) -> web.Response:
request_session_token = request.match_info['session']
session = session_from_request(request)
if not session or request_session_token != session.token:
LOG.warning(f"request for invalid session {request_session_token}")
return web.json_response(
data={'error': 'bad-token',
'message': f'No such session {request_session_token}'},
status=404)
return await handler(request, session)
return decorated | Decorator to ensure a session is properly in the request | Below is the the instruction that describes the task:
### Input:
Decorator to ensure a session is properly in the request
### Response:
def require_session(handler):
""" Decorator to ensure a session is properly in the request """
@functools.wraps(handler)
async def decorated(request: web.Request) -> web.Response:
request_session_token = request.match_info['session']
session = session_from_request(request)
if not session or request_session_token != session.token:
LOG.warning(f"request for invalid session {request_session_token}")
return web.json_response(
data={'error': 'bad-token',
'message': f'No such session {request_session_token}'},
status=404)
return await handler(request, session)
return decorated |
def _realToVisibleColumn(self, text, realColumn):
"""If \t is used, real position of symbol in block and visible position differs
This function converts real to visible
"""
generator = self._visibleCharPositionGenerator(text)
for i in range(realColumn):
val = next(generator)
val = next(generator)
return val | If \t is used, real position of symbol in block and visible position differs
This function converts real to visible | Below is the the instruction that describes the task:
### Input:
If \t is used, real position of symbol in block and visible position differs
This function converts real to visible
### Response:
def _realToVisibleColumn(self, text, realColumn):
"""If \t is used, real position of symbol in block and visible position differs
This function converts real to visible
"""
generator = self._visibleCharPositionGenerator(text)
for i in range(realColumn):
val = next(generator)
val = next(generator)
return val |
async def get_sound_settings(self, target="") -> List[Setting]:
"""Get the current sound settings.
:param str target: settings target, defaults to all.
"""
res = await self.services["audio"]["getSoundSettings"]({"target": target})
return [Setting.make(**x) for x in res] | Get the current sound settings.
:param str target: settings target, defaults to all. | Below is the the instruction that describes the task:
### Input:
Get the current sound settings.
:param str target: settings target, defaults to all.
### Response:
async def get_sound_settings(self, target="") -> List[Setting]:
"""Get the current sound settings.
:param str target: settings target, defaults to all.
"""
res = await self.services["audio"]["getSoundSettings"]({"target": target})
return [Setting.make(**x) for x in res] |
def set_page_artid(self, page_start=None, page_end=None, artid=None):
"""Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page
"""
if page_end and not page_start:
raise ValueError('End_page provided without start_page')
self._ensure_reference_field('publication_info', {})
publication_info = self.obj['reference']['publication_info']
if page_start:
publication_info['page_start'] = page_start
if page_end:
publication_info['page_end'] = page_end
if artid:
publication_info['artid'] = artid | Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page | Below is the the instruction that describes the task:
### Input:
Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page
### Response:
def set_page_artid(self, page_start=None, page_end=None, artid=None):
"""Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page
"""
if page_end and not page_start:
raise ValueError('End_page provided without start_page')
self._ensure_reference_field('publication_info', {})
publication_info = self.obj['reference']['publication_info']
if page_start:
publication_info['page_start'] = page_start
if page_end:
publication_info['page_end'] = page_end
if artid:
publication_info['artid'] = artid |
def internal2external_grad(xi, bounds):
"""
Calculate the internal to external gradiant
Calculates the partial of external over internal
"""
ge = np.empty_like(xi)
for i, (v, bound) in enumerate(zip(xi, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
ge[i] = 1.0
elif b == None: # only min
ge[i] = v / np.sqrt(v ** 2 + 1)
elif a == None: # only max
ge[i] = -v / np.sqrt(v ** 2 + 1)
else: # both min and max
ge[i] = (b - a) * np.cos(v) / 2.
return ge | Calculate the internal to external gradiant
Calculates the partial of external over internal | Below is the the instruction that describes the task:
### Input:
Calculate the internal to external gradiant
Calculates the partial of external over internal
### Response:
def internal2external_grad(xi, bounds):
"""
Calculate the internal to external gradiant
Calculates the partial of external over internal
"""
ge = np.empty_like(xi)
for i, (v, bound) in enumerate(zip(xi, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
ge[i] = 1.0
elif b == None: # only min
ge[i] = v / np.sqrt(v ** 2 + 1)
elif a == None: # only max
ge[i] = -v / np.sqrt(v ** 2 + 1)
else: # both min and max
ge[i] = (b - a) * np.cos(v) / 2.
return ge |
def device_slug_to_id(slug):
"""Convert a d-- device slug to an integer.
Args:
slug (str): A slug in the format d--XXXX-XXXX-XXXX-XXXX
Returns:
int: The device id as an integer
Raises:
ArgumentError: if there is a malformed slug
"""
if not isinstance(slug, str):
raise ArgumentError("Invalid device slug that is not a string", slug=slug)
try:
device_slug = IOTileDeviceSlug(slug, allow_64bits=False)
except ValueError:
raise ArgumentError("Unable to recognize {} as a device id".format(slug))
return device_slug.get_id() | Convert a d-- device slug to an integer.
Args:
slug (str): A slug in the format d--XXXX-XXXX-XXXX-XXXX
Returns:
int: The device id as an integer
Raises:
ArgumentError: if there is a malformed slug | Below is the the instruction that describes the task:
### Input:
Convert a d-- device slug to an integer.
Args:
slug (str): A slug in the format d--XXXX-XXXX-XXXX-XXXX
Returns:
int: The device id as an integer
Raises:
ArgumentError: if there is a malformed slug
### Response:
def device_slug_to_id(slug):
"""Convert a d-- device slug to an integer.
Args:
slug (str): A slug in the format d--XXXX-XXXX-XXXX-XXXX
Returns:
int: The device id as an integer
Raises:
ArgumentError: if there is a malformed slug
"""
if not isinstance(slug, str):
raise ArgumentError("Invalid device slug that is not a string", slug=slug)
try:
device_slug = IOTileDeviceSlug(slug, allow_64bits=False)
except ValueError:
raise ArgumentError("Unable to recognize {} as a device id".format(slug))
return device_slug.get_id() |
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if argument._proxied.name == "dict" and utils.is_builtin_object(
argument._proxied
):
self.add_message("bad-reversed-sequence", node=node)
return
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node) | check that the argument to `reversed` is a sequence | Below is the the instruction that describes the task:
### Input:
check that the argument to `reversed` is a sequence
### Response:
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if argument._proxied.name == "dict" and utils.is_builtin_object(
argument._proxied
):
self.add_message("bad-reversed-sequence", node=node)
return
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node) |
def process_text_constructor(cleaner: Callable,
tokenizer: Callable,
append_indicators: bool,
start_tok: str,
end_tok: str):
"""Generate a function that will clean and tokenize text."""
def process_text(text):
if append_indicators:
return [[start_tok] + tokenizer(cleaner(doc)) + [end_tok] for doc in text]
return [tokenizer(cleaner(doc)) for doc in text]
return process_text | Generate a function that will clean and tokenize text. | Below is the the instruction that describes the task:
### Input:
Generate a function that will clean and tokenize text.
### Response:
def process_text_constructor(cleaner: Callable,
tokenizer: Callable,
append_indicators: bool,
start_tok: str,
end_tok: str):
"""Generate a function that will clean and tokenize text."""
def process_text(text):
if append_indicators:
return [[start_tok] + tokenizer(cleaner(doc)) + [end_tok] for doc in text]
return [tokenizer(cleaner(doc)) for doc in text]
return process_text |
def _ascii_tree(self, indent: str, no_types: bool, val_count: bool) -> str:
"""Return the receiver's subtree as ASCII art."""
def suffix(sn):
return f" {{{sn.val_count}}}\n" if val_count else "\n"
if not self.children:
return ""
cs = []
for c in self.children:
cs.extend(c._flatten())
cs.sort(key=lambda x: x.qual_name)
res = ""
for c in cs[:-1]:
res += (indent + c._tree_line(no_types) + suffix(c) +
c._ascii_tree(indent + "| ", no_types, val_count))
return (res + indent + cs[-1]._tree_line(no_types) + suffix(cs[-1]) +
cs[-1]._ascii_tree(indent + " ", no_types, val_count)) | Return the receiver's subtree as ASCII art. | Below is the the instruction that describes the task:
### Input:
Return the receiver's subtree as ASCII art.
### Response:
def _ascii_tree(self, indent: str, no_types: bool, val_count: bool) -> str:
"""Return the receiver's subtree as ASCII art."""
def suffix(sn):
return f" {{{sn.val_count}}}\n" if val_count else "\n"
if not self.children:
return ""
cs = []
for c in self.children:
cs.extend(c._flatten())
cs.sort(key=lambda x: x.qual_name)
res = ""
for c in cs[:-1]:
res += (indent + c._tree_line(no_types) + suffix(c) +
c._ascii_tree(indent + "| ", no_types, val_count))
return (res + indent + cs[-1]._tree_line(no_types) + suffix(cs[-1]) +
cs[-1]._ascii_tree(indent + " ", no_types, val_count)) |
def use_federated_vault_view(self):
"""Pass through to provider AuthorizationLookupSession.use_federated_vault_view"""
self._vault_view = FEDERATED
# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_vault_view()
except AttributeError:
pass | Pass through to provider AuthorizationLookupSession.use_federated_vault_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider AuthorizationLookupSession.use_federated_vault_view
### Response:
def use_federated_vault_view(self):
"""Pass through to provider AuthorizationLookupSession.use_federated_vault_view"""
self._vault_view = FEDERATED
# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_vault_view()
except AttributeError:
pass |
def load_nifti(filename, to='auto'):
'''
load_nifti(filename) yields the Nifti1Image or Nifti2Image referened by the given filename by
using the nibabel load function.
The optional argument to may be used to coerce the resulting data to a particular format; the
following arguments are understood:
* 'header' will yield just the image header
* 'data' will yield the image's data-array
* 'field' will yield a squeezed version of the image's data-array and will raise an error if
the data object has more than 2 non-unitary dimensions (appropriate for loading surface
properties stored in image files)
* 'affine' will yield the image's affine transformation
* 'image' will yield the raw image object
* 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions,
in which case it is assumed to be a surface-field and the return value is equivalent to
the 'field' value.
'''
img = nib.load(filename)
to = to.lower()
if to == 'image': return img
elif to == 'data': return img.get_data()
elif to == 'affine': return img.affine
elif to == 'header': return img.header
elif to == 'field':
dat = np.squeeze(np.asarray(img.get_data()))
if len(dat.shape) > 2:
raise ValueError('image requested as field has more than 2 non-unitary dimensions')
return dat
elif to in ['auto', 'automatic']:
dims = set(np.shape(img.get_data()))
if 1 < len(dims) < 4 and 1 in dims:
return np.squeeze(np.asarray(img.get_data()))
else:
return img
else:
raise ValueError('unrecognized \'to\' argument \'%s\'' % to) | load_nifti(filename) yields the Nifti1Image or Nifti2Image referened by the given filename by
using the nibabel load function.
The optional argument to may be used to coerce the resulting data to a particular format; the
following arguments are understood:
* 'header' will yield just the image header
* 'data' will yield the image's data-array
* 'field' will yield a squeezed version of the image's data-array and will raise an error if
the data object has more than 2 non-unitary dimensions (appropriate for loading surface
properties stored in image files)
* 'affine' will yield the image's affine transformation
* 'image' will yield the raw image object
* 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions,
in which case it is assumed to be a surface-field and the return value is equivalent to
the 'field' value. | Below is the the instruction that describes the task:
### Input:
load_nifti(filename) yields the Nifti1Image or Nifti2Image referened by the given filename by
using the nibabel load function.
The optional argument to may be used to coerce the resulting data to a particular format; the
following arguments are understood:
* 'header' will yield just the image header
* 'data' will yield the image's data-array
* 'field' will yield a squeezed version of the image's data-array and will raise an error if
the data object has more than 2 non-unitary dimensions (appropriate for loading surface
properties stored in image files)
* 'affine' will yield the image's affine transformation
* 'image' will yield the raw image object
* 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions,
in which case it is assumed to be a surface-field and the return value is equivalent to
the 'field' value.
### Response:
def load_nifti(filename, to='auto'):
'''
load_nifti(filename) yields the Nifti1Image or Nifti2Image referened by the given filename by
using the nibabel load function.
The optional argument to may be used to coerce the resulting data to a particular format; the
following arguments are understood:
* 'header' will yield just the image header
* 'data' will yield the image's data-array
* 'field' will yield a squeezed version of the image's data-array and will raise an error if
the data object has more than 2 non-unitary dimensions (appropriate for loading surface
properties stored in image files)
* 'affine' will yield the image's affine transformation
* 'image' will yield the raw image object
* 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions,
in which case it is assumed to be a surface-field and the return value is equivalent to
the 'field' value.
'''
img = nib.load(filename)
to = to.lower()
if to == 'image': return img
elif to == 'data': return img.get_data()
elif to == 'affine': return img.affine
elif to == 'header': return img.header
elif to == 'field':
dat = np.squeeze(np.asarray(img.get_data()))
if len(dat.shape) > 2:
raise ValueError('image requested as field has more than 2 non-unitary dimensions')
return dat
elif to in ['auto', 'automatic']:
dims = set(np.shape(img.get_data()))
if 1 < len(dims) < 4 and 1 in dims:
return np.squeeze(np.asarray(img.get_data()))
else:
return img
else:
raise ValueError('unrecognized \'to\' argument \'%s\'' % to) |
def cancel_global_ip(self, global_ip_id):
"""Cancels the specified global IP address.
:param int id: The ID of the global IP to be cancelled.
"""
service = self.client['Network_Subnet_IpAddress_Global']
ip_address = service.getObject(id=global_ip_id, mask='billingItem')
billing_id = ip_address['billingItem']['id']
return self.client['Billing_Item'].cancelService(id=billing_id) | Cancels the specified global IP address.
:param int id: The ID of the global IP to be cancelled. | Below is the the instruction that describes the task:
### Input:
Cancels the specified global IP address.
:param int id: The ID of the global IP to be cancelled.
### Response:
def cancel_global_ip(self, global_ip_id):
"""Cancels the specified global IP address.
:param int id: The ID of the global IP to be cancelled.
"""
service = self.client['Network_Subnet_IpAddress_Global']
ip_address = service.getObject(id=global_ip_id, mask='billingItem')
billing_id = ip_address['billingItem']['id']
return self.client['Billing_Item'].cancelService(id=billing_id) |
def collect_fields(node):
"""
Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230
"""
fields = set()
for leaf in node:
if leaf.get('kind', None) == "Field":
fields.add(leaf["name"]["value"])
if leaf.get("selection_set", None):
fields = fields.union(collect_fields(leaf["selection_set"]["selections"]))
return fields | Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230 | Below is the the instruction that describes the task:
### Input:
Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230
### Response:
def collect_fields(node):
"""
Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230
"""
fields = set()
for leaf in node:
if leaf.get('kind', None) == "Field":
fields.add(leaf["name"]["value"])
if leaf.get("selection_set", None):
fields = fields.union(collect_fields(leaf["selection_set"]["selections"]))
return fields |
def key_to_scan_codes(key, error_if_missing=True):
"""
Returns a list of scan codes associated with this key (name or scan code).
"""
if _is_number(key):
return (key,)
elif _is_list(key):
return sum((key_to_scan_codes(i) for i in key), ())
elif not _is_str(key):
raise ValueError('Unexpected key type ' + str(type(key)) + ', value (' + repr(key) + ')')
normalized = normalize_name(key)
if normalized in sided_modifiers:
left_scan_codes = key_to_scan_codes('left ' + normalized, False)
right_scan_codes = key_to_scan_codes('right ' + normalized, False)
return left_scan_codes + tuple(c for c in right_scan_codes if c not in left_scan_codes)
try:
# Put items in ordered dict to remove duplicates.
t = tuple(_collections.OrderedDict((scan_code, True) for scan_code, modifier in _os_keyboard.map_name(normalized)))
e = None
except (KeyError, ValueError) as exception:
t = ()
e = exception
if not t and error_if_missing:
raise ValueError('Key {} is not mapped to any known key.'.format(repr(key)), e)
else:
return t | Returns a list of scan codes associated with this key (name or scan code). | Below is the the instruction that describes the task:
### Input:
Returns a list of scan codes associated with this key (name or scan code).
### Response:
def key_to_scan_codes(key, error_if_missing=True):
"""
Returns a list of scan codes associated with this key (name or scan code).
"""
if _is_number(key):
return (key,)
elif _is_list(key):
return sum((key_to_scan_codes(i) for i in key), ())
elif not _is_str(key):
raise ValueError('Unexpected key type ' + str(type(key)) + ', value (' + repr(key) + ')')
normalized = normalize_name(key)
if normalized in sided_modifiers:
left_scan_codes = key_to_scan_codes('left ' + normalized, False)
right_scan_codes = key_to_scan_codes('right ' + normalized, False)
return left_scan_codes + tuple(c for c in right_scan_codes if c not in left_scan_codes)
try:
# Put items in ordered dict to remove duplicates.
t = tuple(_collections.OrderedDict((scan_code, True) for scan_code, modifier in _os_keyboard.map_name(normalized)))
e = None
except (KeyError, ValueError) as exception:
t = ()
e = exception
if not t and error_if_missing:
raise ValueError('Key {} is not mapped to any known key.'.format(repr(key)), e)
else:
return t |
def dump(pif, fp, **kwargs):
"""
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump().
"""
return json.dump(pif, fp, cls=PifEncoder, **kwargs) | Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump(). | Below is the the instruction that describes the task:
### Input:
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump().
### Response:
def dump(pif, fp, **kwargs):
"""
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump().
"""
return json.dump(pif, fp, cls=PifEncoder, **kwargs) |
def get_unit_by_abbreviation(unit_abbreviation, **kwargs):
"""
Returns a single unit by abbreviation. Used as utility function to resolve string to id
"""
try:
if unit_abbreviation is None:
unit_abbreviation = ''
unit_i = db.DBSession.query(Unit).filter(Unit.abbreviation==unit_abbreviation.strip()).one()
return JSONObject(unit_i)
except NoResultFound:
# The dimension does not exist
raise ResourceNotFoundError("Unit '%s' not found"%(unit_abbreviation)) | Returns a single unit by abbreviation. Used as utility function to resolve string to id | Below is the the instruction that describes the task:
### Input:
Returns a single unit by abbreviation. Used as utility function to resolve string to id
### Response:
def get_unit_by_abbreviation(unit_abbreviation, **kwargs):
"""
Returns a single unit by abbreviation. Used as utility function to resolve string to id
"""
try:
if unit_abbreviation is None:
unit_abbreviation = ''
unit_i = db.DBSession.query(Unit).filter(Unit.abbreviation==unit_abbreviation.strip()).one()
return JSONObject(unit_i)
except NoResultFound:
# The dimension does not exist
raise ResourceNotFoundError("Unit '%s' not found"%(unit_abbreviation)) |
def handle_import(self, name, compilation, rule):
"""
Re-implementation of the core Sass import mechanism, which looks for
files using the staticfiles storage and staticfiles finders.
"""
original_path = PurePath(name)
search_exts = list(compilation.compiler.dynamic_extensions)
if original_path.suffix and original_path.suffix in search_exts:
basename = original_path.stem
else:
basename = original_path.name
if original_path.is_absolute():
# Remove the beginning slash
search_path = original_path.relative_to('/').parent
elif rule.source_file.origin:
search_path = rule.source_file.origin
if original_path.parent:
search_path = os.path.normpath(str(search_path / original_path.parent))
else:
search_path = original_path.parent
for prefix, suffix in product(('_', ''), search_exts):
filename = PurePath(prefix + basename + suffix)
full_filename, storage = get_file_and_storage(str(search_path / filename))
if full_filename:
with storage.open(full_filename) as f:
return SourceFile.from_file(f, origin=search_path, relpath=filename) | Re-implementation of the core Sass import mechanism, which looks for
files using the staticfiles storage and staticfiles finders. | Below is the the instruction that describes the task:
### Input:
Re-implementation of the core Sass import mechanism, which looks for
files using the staticfiles storage and staticfiles finders.
### Response:
def handle_import(self, name, compilation, rule):
"""
Re-implementation of the core Sass import mechanism, which looks for
files using the staticfiles storage and staticfiles finders.
"""
original_path = PurePath(name)
search_exts = list(compilation.compiler.dynamic_extensions)
if original_path.suffix and original_path.suffix in search_exts:
basename = original_path.stem
else:
basename = original_path.name
if original_path.is_absolute():
# Remove the beginning slash
search_path = original_path.relative_to('/').parent
elif rule.source_file.origin:
search_path = rule.source_file.origin
if original_path.parent:
search_path = os.path.normpath(str(search_path / original_path.parent))
else:
search_path = original_path.parent
for prefix, suffix in product(('_', ''), search_exts):
filename = PurePath(prefix + basename + suffix)
full_filename, storage = get_file_and_storage(str(search_path / filename))
if full_filename:
with storage.open(full_filename) as f:
return SourceFile.from_file(f, origin=search_path, relpath=filename) |
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env | Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.) | Below is the the instruction that describes the task:
### Input:
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
### Response:
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env |
def set_left_table(self, left_table=None):
"""
Sets the left table for this join clause. If no table is specified, the first table
in the query will be used
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` or None
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
"""
if left_table:
self.left_table = TableFactory(
table=left_table,
owner=self.owner,
)
else:
self.left_table = self.get_left_table() | Sets the left table for this join clause. If no table is specified, the first table
in the query will be used
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` or None
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query. | Below is the the instruction that describes the task:
### Input:
Sets the left table for this join clause. If no table is specified, the first table
in the query will be used
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` or None
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
### Response:
def set_left_table(self, left_table=None):
"""
Sets the left table for this join clause. If no table is specified, the first table
in the query will be used
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` or None
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
"""
if left_table:
self.left_table = TableFactory(
table=left_table,
owner=self.owner,
)
else:
self.left_table = self.get_left_table() |
def _datetime_to_rfc3339(value, ignore_zone=True):
"""Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp.
"""
if not ignore_zone and value.tzinfo is not None:
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
return value.strftime(_RFC3339_MICROS) | Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp. | Below is the the instruction that describes the task:
### Input:
Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp.
### Response:
def _datetime_to_rfc3339(value, ignore_zone=True):
"""Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp.
"""
if not ignore_zone and value.tzinfo is not None:
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
return value.strftime(_RFC3339_MICROS) |
def _is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if common.is_philips([dicom_header]):
if common.is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
# for all others if there is image position patient we assume it is ok
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False | Function will do some basic checks to see if this is a valid imaging dicom | Below is the the instruction that describes the task:
### Input:
Function will do some basic checks to see if this is a valid imaging dicom
### Response:
def _is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if common.is_philips([dicom_header]):
if common.is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
# for all others if there is image position patient we assume it is ok
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False |
def nlmsg_reserve(n, len_, pad):
"""Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None.
"""
nlmsg_len_ = n.nm_nlh.nlmsg_len
tlen = len_ if not pad else ((len_ + (pad - 1)) & ~(pad - 1))
if tlen + nlmsg_len_ > n.nm_size:
return None
buf = bytearray_ptr(n.nm_nlh.bytearray, nlmsg_len_)
n.nm_nlh.nlmsg_len += tlen
if tlen > len_:
bytearray_ptr(buf, len_, tlen)[:] = bytearray(b'\0') * (tlen - len_)
_LOGGER.debug('msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d', id(n), tlen, len_, pad, n.nm_nlh.nlmsg_len)
return buf | Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None. | Below is the the instruction that describes the task:
### Input:
Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None.
### Response:
def nlmsg_reserve(n, len_, pad):
"""Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None.
"""
nlmsg_len_ = n.nm_nlh.nlmsg_len
tlen = len_ if not pad else ((len_ + (pad - 1)) & ~(pad - 1))
if tlen + nlmsg_len_ > n.nm_size:
return None
buf = bytearray_ptr(n.nm_nlh.bytearray, nlmsg_len_)
n.nm_nlh.nlmsg_len += tlen
if tlen > len_:
bytearray_ptr(buf, len_, tlen)[:] = bytearray(b'\0') * (tlen - len_)
_LOGGER.debug('msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d', id(n), tlen, len_, pad, n.nm_nlh.nlmsg_len)
return buf |
def sis_metadata(self):
"""Return Olympus SIS metadata from SIS and INI tags as dict."""
if not self.is_sis:
return None
tags = self.pages[0].tags
result = {}
try:
result.update(tags['OlympusINI'].value)
except Exception:
pass
try:
result.update(tags['OlympusSIS'].value)
except Exception:
pass
return result | Return Olympus SIS metadata from SIS and INI tags as dict. | Below is the the instruction that describes the task:
### Input:
Return Olympus SIS metadata from SIS and INI tags as dict.
### Response:
def sis_metadata(self):
"""Return Olympus SIS metadata from SIS and INI tags as dict."""
if not self.is_sis:
return None
tags = self.pages[0].tags
result = {}
try:
result.update(tags['OlympusINI'].value)
except Exception:
pass
try:
result.update(tags['OlympusSIS'].value)
except Exception:
pass
return result |
def unregister_model(self, storagemodel:object, modeldefinition = None, delete_queue=False):
""" clear up an Queueservice for an StorageQueueModel in your Azure Storage Account
Will delete the hole Queue if delete_queue Flag is True!
required Parameter is:
- storagemodel: StorageQueueModel(Object)
Optional Parameter is:
- delete_queue: bool
"""
""" remove from modeldefinitions """
for i in range(len(self._modeldefinitions)):
if self._modeldefinitions[i]['modelname'] == modeldefinition['modelname']:
del self._modeldefinitions[i]
break
""" delete queue from storage if delete_queue == True """
if delete_queue:
self.__deletequeue__(modeldefinition)
log.info('model {} unregistered successfully. Models are {!s}'.format(modeldefinition['modelname'], [model['modelname'] for model in self._modeldefinitions]))
pass | clear up an Queueservice for an StorageQueueModel in your Azure Storage Account
Will delete the hole Queue if delete_queue Flag is True!
required Parameter is:
- storagemodel: StorageQueueModel(Object)
Optional Parameter is:
- delete_queue: bool | Below is the the instruction that describes the task:
### Input:
clear up an Queueservice for an StorageQueueModel in your Azure Storage Account
Will delete the hole Queue if delete_queue Flag is True!
required Parameter is:
- storagemodel: StorageQueueModel(Object)
Optional Parameter is:
- delete_queue: bool
### Response:
def unregister_model(self, storagemodel:object, modeldefinition = None, delete_queue=False):
""" clear up an Queueservice for an StorageQueueModel in your Azure Storage Account
Will delete the hole Queue if delete_queue Flag is True!
required Parameter is:
- storagemodel: StorageQueueModel(Object)
Optional Parameter is:
- delete_queue: bool
"""
""" remove from modeldefinitions """
for i in range(len(self._modeldefinitions)):
if self._modeldefinitions[i]['modelname'] == modeldefinition['modelname']:
del self._modeldefinitions[i]
break
""" delete queue from storage if delete_queue == True """
if delete_queue:
self.__deletequeue__(modeldefinition)
log.info('model {} unregistered successfully. Models are {!s}'.format(modeldefinition['modelname'], [model['modelname'] for model in self._modeldefinitions]))
pass |
def data_received(self, data):
"""Add incoming data to buffer."""
data = data.decode('ascii')
self.log.debug('received data: %s', data)
self.telegram_buffer.append(data)
for telegram in self.telegram_buffer.get_all():
self.handle_telegram(telegram) | Add incoming data to buffer. | Below is the the instruction that describes the task:
### Input:
Add incoming data to buffer.
### Response:
def data_received(self, data):
"""Add incoming data to buffer."""
data = data.decode('ascii')
self.log.debug('received data: %s', data)
self.telegram_buffer.append(data)
for telegram in self.telegram_buffer.get_all():
self.handle_telegram(telegram) |
def atlasdb_open( path ):
"""
Open the atlas db.
Return a connection.
Return None if it doesn't exist
"""
if not os.path.exists(path):
log.debug("Atlas DB doesn't exist at %s" % path)
return None
con = sqlite3.connect( path, isolation_level=None )
con.row_factory = atlasdb_row_factory
return con | Open the atlas db.
Return a connection.
Return None if it doesn't exist | Below is the the instruction that describes the task:
### Input:
Open the atlas db.
Return a connection.
Return None if it doesn't exist
### Response:
def atlasdb_open( path ):
"""
Open the atlas db.
Return a connection.
Return None if it doesn't exist
"""
if not os.path.exists(path):
log.debug("Atlas DB doesn't exist at %s" % path)
return None
con = sqlite3.connect( path, isolation_level=None )
con.row_factory = atlasdb_row_factory
return con |
def validate(self):
"""
validate whether value in config file is correct.
"""
spec = self._create_specs()
# support in future
functions = {}
validator = validate.Validator(functions=functions)
self.config.configspec = spec
result = self.config.validate(validator, preserve_errors=True)
if self._parse_result(result):
return True | validate whether value in config file is correct. | Below is the the instruction that describes the task:
### Input:
validate whether value in config file is correct.
### Response:
def validate(self):
"""
validate whether value in config file is correct.
"""
spec = self._create_specs()
# support in future
functions = {}
validator = validate.Validator(functions=functions)
self.config.configspec = spec
result = self.config.validate(validator, preserve_errors=True)
if self._parse_result(result):
return True |
def add_table(self, rows, cols, left, top, width, height):
"""
Add a |GraphicFrame| object containing a table with the specified
number of *rows* and *cols* and the specified position and size.
*width* is evenly distributed between the columns of the new table.
Likewise, *height* is evenly distributed between the rows. Note that
the ``.table`` property on the returned |GraphicFrame| shape must be
used to access the enclosed |Table| object.
"""
graphicFrame = self._add_graphicFrame_containing_table(
rows, cols, left, top, width, height
)
graphic_frame = self._shape_factory(graphicFrame)
return graphic_frame | Add a |GraphicFrame| object containing a table with the specified
number of *rows* and *cols* and the specified position and size.
*width* is evenly distributed between the columns of the new table.
Likewise, *height* is evenly distributed between the rows. Note that
the ``.table`` property on the returned |GraphicFrame| shape must be
used to access the enclosed |Table| object. | Below is the the instruction that describes the task:
### Input:
Add a |GraphicFrame| object containing a table with the specified
number of *rows* and *cols* and the specified position and size.
*width* is evenly distributed between the columns of the new table.
Likewise, *height* is evenly distributed between the rows. Note that
the ``.table`` property on the returned |GraphicFrame| shape must be
used to access the enclosed |Table| object.
### Response:
def add_table(self, rows, cols, left, top, width, height):
"""
Add a |GraphicFrame| object containing a table with the specified
number of *rows* and *cols* and the specified position and size.
*width* is evenly distributed between the columns of the new table.
Likewise, *height* is evenly distributed between the rows. Note that
the ``.table`` property on the returned |GraphicFrame| shape must be
used to access the enclosed |Table| object.
"""
graphicFrame = self._add_graphicFrame_containing_table(
rows, cols, left, top, width, height
)
graphic_frame = self._shape_factory(graphicFrame)
return graphic_frame |
def strToBytes(value):
'''
:type value: ``str``
:param value: value to encode
'''
kassert.is_of_types(value, (bytes, bytearray, six.string_types))
if isinstance(value, six.string_types):
return bytes(bytearray([ord(x) for x in value]))
elif isinstance(value, bytearray):
return bytes(value)
return value | :type value: ``str``
:param value: value to encode | Below is the the instruction that describes the task:
### Input:
:type value: ``str``
:param value: value to encode
### Response:
def strToBytes(value):
'''
:type value: ``str``
:param value: value to encode
'''
kassert.is_of_types(value, (bytes, bytearray, six.string_types))
if isinstance(value, six.string_types):
return bytes(bytearray([ord(x) for x in value]))
elif isinstance(value, bytearray):
return bytes(value)
return value |
def replace_vertex_references(self, mask):
"""
Replace the vertex index references in every entity.
Parameters
------------
mask : (len(self.vertices), ) int
Contains new vertex indexes
Alters
------------
entity.points in self.entities
Replaced by mask[entity.points]
"""
for entity in self.entities:
entity.points = mask[entity.points] | Replace the vertex index references in every entity.
Parameters
------------
mask : (len(self.vertices), ) int
Contains new vertex indexes
Alters
------------
entity.points in self.entities
Replaced by mask[entity.points] | Below is the the instruction that describes the task:
### Input:
Replace the vertex index references in every entity.
Parameters
------------
mask : (len(self.vertices), ) int
Contains new vertex indexes
Alters
------------
entity.points in self.entities
Replaced by mask[entity.points]
### Response:
def replace_vertex_references(self, mask):
"""
Replace the vertex index references in every entity.
Parameters
------------
mask : (len(self.vertices), ) int
Contains new vertex indexes
Alters
------------
entity.points in self.entities
Replaced by mask[entity.points]
"""
for entity in self.entities:
entity.points = mask[entity.points] |
def add_attribute(self, attrkey, attrvalue, append=False, oldvalue=None):
"""
Add an attribute to this feature.
Feature attributes are stored as nested dictionaries.
Each feature can only have one ID, so ID attribute mapping is 'string'
to 'string'. All other attributes can have multiple values, so mapping
is 'string' to 'dict of strings'.
By default, adding an attribute that already exists will cause the old
value to be overwritten. If the `append` option is true, the new
attribute value will not overwrite the old value, but will be appended
as a second value. (Note: ID attributes can have only 1 value.)
If the `oldvalue` option is set, the new value will replace the old
value. This is necessary for updating an attribute that has multiple
values without completely overwriting all old values. (Note: The
`append` option is ignored when `oldvalue` is set.)
"""
# Handle ID/Parent relationships
if attrkey == 'ID':
if self.children is not None:
oldid = self.get_attribute('ID')
for child in self.children:
child.add_attribute('Parent', attrvalue,
oldvalue=oldid)
self._attrs[attrkey] = attrvalue
if self.is_multi:
self.multi_rep._attrs[attrkey] = attrvalue
for sibling in self.multi_rep.siblings:
sibling._attrs[attrkey] = attrvalue
return
# Handle all other attribute types
if oldvalue is not None:
if attrkey in self._attrs:
assert oldvalue in self._attrs[attrkey]
del self._attrs[attrkey][oldvalue]
if attrkey not in self._attrs or append is False:
self._attrs[attrkey] = dict()
self._attrs[attrkey][attrvalue] = True | Add an attribute to this feature.
Feature attributes are stored as nested dictionaries.
Each feature can only have one ID, so ID attribute mapping is 'string'
to 'string'. All other attributes can have multiple values, so mapping
is 'string' to 'dict of strings'.
By default, adding an attribute that already exists will cause the old
value to be overwritten. If the `append` option is true, the new
attribute value will not overwrite the old value, but will be appended
as a second value. (Note: ID attributes can have only 1 value.)
If the `oldvalue` option is set, the new value will replace the old
value. This is necessary for updating an attribute that has multiple
values without completely overwriting all old values. (Note: The
`append` option is ignored when `oldvalue` is set.) | Below is the the instruction that describes the task:
### Input:
Add an attribute to this feature.
Feature attributes are stored as nested dictionaries.
Each feature can only have one ID, so ID attribute mapping is 'string'
to 'string'. All other attributes can have multiple values, so mapping
is 'string' to 'dict of strings'.
By default, adding an attribute that already exists will cause the old
value to be overwritten. If the `append` option is true, the new
attribute value will not overwrite the old value, but will be appended
as a second value. (Note: ID attributes can have only 1 value.)
If the `oldvalue` option is set, the new value will replace the old
value. This is necessary for updating an attribute that has multiple
values without completely overwriting all old values. (Note: The
`append` option is ignored when `oldvalue` is set.)
### Response:
def add_attribute(self, attrkey, attrvalue, append=False, oldvalue=None):
"""
Add an attribute to this feature.
Feature attributes are stored as nested dictionaries.
Each feature can only have one ID, so ID attribute mapping is 'string'
to 'string'. All other attributes can have multiple values, so mapping
is 'string' to 'dict of strings'.
By default, adding an attribute that already exists will cause the old
value to be overwritten. If the `append` option is true, the new
attribute value will not overwrite the old value, but will be appended
as a second value. (Note: ID attributes can have only 1 value.)
If the `oldvalue` option is set, the new value will replace the old
value. This is necessary for updating an attribute that has multiple
values without completely overwriting all old values. (Note: The
`append` option is ignored when `oldvalue` is set.)
"""
# Handle ID/Parent relationships
if attrkey == 'ID':
if self.children is not None:
oldid = self.get_attribute('ID')
for child in self.children:
child.add_attribute('Parent', attrvalue,
oldvalue=oldid)
self._attrs[attrkey] = attrvalue
if self.is_multi:
self.multi_rep._attrs[attrkey] = attrvalue
for sibling in self.multi_rep.siblings:
sibling._attrs[attrkey] = attrvalue
return
# Handle all other attribute types
if oldvalue is not None:
if attrkey in self._attrs:
assert oldvalue in self._attrs[attrkey]
del self._attrs[attrkey][oldvalue]
if attrkey not in self._attrs or append is False:
self._attrs[attrkey] = dict()
self._attrs[attrkey][attrvalue] = True |
def get_package_version(path):
'''Extracts the version'''
with open(VERSION_FILE, "rt") as f:
verstrline = f.read()
VERSION = r"^version = ['\"]([^'\"]*)['\"]"
results = re.search(VERSION, verstrline, re.M)
if results:
version = results.group(1)
else:
raise RuntimeError("Unable to find version string in {}.".format(path))
return version | Extracts the version | Below is the the instruction that describes the task:
### Input:
Extracts the version
### Response:
def get_package_version(path):
'''Extracts the version'''
with open(VERSION_FILE, "rt") as f:
verstrline = f.read()
VERSION = r"^version = ['\"]([^'\"]*)['\"]"
results = re.search(VERSION, verstrline, re.M)
if results:
version = results.group(1)
else:
raise RuntimeError("Unable to find version string in {}.".format(path))
return version |
def total_bytes_billed(self):
"""Return total bytes billed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled
:rtype: int or None
:returns: total bytes processed by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get("totalBytesBilled")
if result is not None:
result = int(result)
return result | Return total bytes billed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled
:rtype: int or None
:returns: total bytes processed by the job, or None if job is not
yet complete. | Below is the the instruction that describes the task:
### Input:
Return total bytes billed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled
:rtype: int or None
:returns: total bytes processed by the job, or None if job is not
yet complete.
### Response:
def total_bytes_billed(self):
"""Return total bytes billed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled
:rtype: int or None
:returns: total bytes processed by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get("totalBytesBilled")
if result is not None:
result = int(result)
return result |
def formatException(self, ei):
'''Indent traceback info for better readability'''
out = super(CliFormatter, self).formatException(ei)
return b'│' + format_multiline(out) | Indent traceback info for better readability | Below is the the instruction that describes the task:
### Input:
Indent traceback info for better readability
### Response:
def formatException(self, ei):
'''Indent traceback info for better readability'''
out = super(CliFormatter, self).formatException(ei)
return b'│' + format_multiline(out) |
def update_in_ioloop(update):
"""Decorator that ensures an update() method is run in the tornado ioloop.
Does this by checking the thread identity. Requires that the object to
which the method is bound has the attributes :attr:`_ioloop_thread_id`
(the result of thread.get_ident() in the ioloop thread) and :attr:`ioloop`
(the ioloop instance in use). Also assumes the signature
`update(self, sensor, reading)` for the method.
"""
@wraps(update)
def wrapped_update(self, sensor, reading):
if get_thread_ident() == self._ioloop_thread_id:
update(self, sensor, reading)
else:
self.ioloop.add_callback(update, self, sensor, reading)
return wrapped_update | Decorator that ensures an update() method is run in the tornado ioloop.
Does this by checking the thread identity. Requires that the object to
which the method is bound has the attributes :attr:`_ioloop_thread_id`
(the result of thread.get_ident() in the ioloop thread) and :attr:`ioloop`
(the ioloop instance in use). Also assumes the signature
`update(self, sensor, reading)` for the method. | Below is the the instruction that describes the task:
### Input:
Decorator that ensures an update() method is run in the tornado ioloop.
Does this by checking the thread identity. Requires that the object to
which the method is bound has the attributes :attr:`_ioloop_thread_id`
(the result of thread.get_ident() in the ioloop thread) and :attr:`ioloop`
(the ioloop instance in use). Also assumes the signature
`update(self, sensor, reading)` for the method.
### Response:
def update_in_ioloop(update):
"""Decorator that ensures an update() method is run in the tornado ioloop.
Does this by checking the thread identity. Requires that the object to
which the method is bound has the attributes :attr:`_ioloop_thread_id`
(the result of thread.get_ident() in the ioloop thread) and :attr:`ioloop`
(the ioloop instance in use). Also assumes the signature
`update(self, sensor, reading)` for the method.
"""
@wraps(update)
def wrapped_update(self, sensor, reading):
if get_thread_ident() == self._ioloop_thread_id:
update(self, sensor, reading)
else:
self.ioloop.add_callback(update, self, sensor, reading)
return wrapped_update |
def _store_compound_info(self):
"""Update the compound_info dictionary with the current chunk of compound details
Note that we use the inchikey as unique identifier. If we can't find an appropiate inchikey we just use
a random string (uuid4) suffixed with UNKNOWN
"""
other_name_l = [name for name in self.other_names if name != self.compound_info['name']]
self.compound_info['other_names'] = ' <#> '.join(other_name_l)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['pubchem_id'], 'cid', 0)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['smiles'], 'smiles', 0)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['name'], 'name', 0)
if not self.compound_info['inchikey_id']:
print('WARNING, cant get inchi key for ', self.compound_info)
print(self.meta_info)
print('#########################')
self.compound_info['inchikey_id'] = 'UNKNOWN_' + str(uuid.uuid4())
if not self.compound_info['pubchem_id'] and self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['inchikey_id'], 'inchikey', 0)
if not self.compound_info['name']:
self.compound_info['name'] = 'unknown name'
if not self.compound_info['inchikey_id'] in self.compound_ids:
self.compound_info_all.append(tuple(self.compound_info.values()) + (
str(datetime.datetime.now()),
str(datetime.datetime.now()),
))
self.compound_ids.append(self.compound_info['inchikey_id']) | Update the compound_info dictionary with the current chunk of compound details
Note that we use the inchikey as unique identifier. If we can't find an appropiate inchikey we just use
a random string (uuid4) suffixed with UNKNOWN | Below is the the instruction that describes the task:
### Input:
Update the compound_info dictionary with the current chunk of compound details
Note that we use the inchikey as unique identifier. If we can't find an appropiate inchikey we just use
a random string (uuid4) suffixed with UNKNOWN
### Response:
def _store_compound_info(self):
"""Update the compound_info dictionary with the current chunk of compound details
Note that we use the inchikey as unique identifier. If we can't find an appropiate inchikey we just use
a random string (uuid4) suffixed with UNKNOWN
"""
other_name_l = [name for name in self.other_names if name != self.compound_info['name']]
self.compound_info['other_names'] = ' <#> '.join(other_name_l)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['pubchem_id'], 'cid', 0)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['smiles'], 'smiles', 0)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['name'], 'name', 0)
if not self.compound_info['inchikey_id']:
print('WARNING, cant get inchi key for ', self.compound_info)
print(self.meta_info)
print('#########################')
self.compound_info['inchikey_id'] = 'UNKNOWN_' + str(uuid.uuid4())
if not self.compound_info['pubchem_id'] and self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['inchikey_id'], 'inchikey', 0)
if not self.compound_info['name']:
self.compound_info['name'] = 'unknown name'
if not self.compound_info['inchikey_id'] in self.compound_ids:
self.compound_info_all.append(tuple(self.compound_info.values()) + (
str(datetime.datetime.now()),
str(datetime.datetime.now()),
))
self.compound_ids.append(self.compound_info['inchikey_id']) |
def _get_offsets(self, aref, dim=0):
"""
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
"""
if isinstance(aref, c_ast.ID):
return None
# Check for restrictions
assert type(aref.name) in [c_ast.ArrayRef, c_ast.ID], \
"array references must only be used with variables or other array references"
assert type(aref.subscript) in [c_ast.ID, c_ast.Constant, c_ast.BinaryOp], \
'array subscript must only contain variables or binary operations'
# Convert subscript to sympy and append
idxs = [self.conv_ast_to_sym(aref.subscript)]
# Check for more indices (multi-dimensional access)
if type(aref.name) is c_ast.ArrayRef:
idxs += self._get_offsets(aref.name, dim=dim+1)
# Reverse to preserver order (the subscripts in the AST are traversed backwards)
if dim == 0:
idxs.reverse()
return tuple(idxs) | Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned. | Below is the the instruction that describes the task:
### Input:
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
### Response:
def _get_offsets(self, aref, dim=0):
"""
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
"""
if isinstance(aref, c_ast.ID):
return None
# Check for restrictions
assert type(aref.name) in [c_ast.ArrayRef, c_ast.ID], \
"array references must only be used with variables or other array references"
assert type(aref.subscript) in [c_ast.ID, c_ast.Constant, c_ast.BinaryOp], \
'array subscript must only contain variables or binary operations'
# Convert subscript to sympy and append
idxs = [self.conv_ast_to_sym(aref.subscript)]
# Check for more indices (multi-dimensional access)
if type(aref.name) is c_ast.ArrayRef:
idxs += self._get_offsets(aref.name, dim=dim+1)
# Reverse to preserver order (the subscripts in the AST are traversed backwards)
if dim == 0:
idxs.reverse()
return tuple(idxs) |
def ret_list_minions(self):
'''
Return minions that match via list
'''
tgt = _tgt_set(self.tgt)
return self._ret_minions(tgt.intersection) | Return minions that match via list | Below is the the instruction that describes the task:
### Input:
Return minions that match via list
### Response:
def ret_list_minions(self):
'''
Return minions that match via list
'''
tgt = _tgt_set(self.tgt)
return self._ret_minions(tgt.intersection) |
def find_all(self, predicate):
"""Returns a generator that produces a sequence of Entry objects for which the predicate returned True.
Args:
predicate: A callable that returns a value coercible to bool.
"""
for _nid, entry in self._registry.items():
if predicate(entry):
yield entry | Returns a generator that produces a sequence of Entry objects for which the predicate returned True.
Args:
predicate: A callable that returns a value coercible to bool. | Below is the the instruction that describes the task:
### Input:
Returns a generator that produces a sequence of Entry objects for which the predicate returned True.
Args:
predicate: A callable that returns a value coercible to bool.
### Response:
def find_all(self, predicate):
"""Returns a generator that produces a sequence of Entry objects for which the predicate returned True.
Args:
predicate: A callable that returns a value coercible to bool.
"""
for _nid, entry in self._registry.items():
if predicate(entry):
yield entry |
def transformFromNative(cls, obj):
"""
Convert the list of tuples in obj.value to strings.
"""
if obj.isNative:
obj.isNative = False
transformed = []
for tup in obj.value:
transformed.append(periodToString(tup, cls.forceUTC))
if len(transformed) > 0:
tzid = TimezoneComponent.registerTzinfo(tup[0].tzinfo)
if not cls.forceUTC and tzid is not None:
obj.tzid_param = tzid
obj.value = ','.join(transformed)
return obj | Convert the list of tuples in obj.value to strings. | Below is the the instruction that describes the task:
### Input:
Convert the list of tuples in obj.value to strings.
### Response:
def transformFromNative(cls, obj):
"""
Convert the list of tuples in obj.value to strings.
"""
if obj.isNative:
obj.isNative = False
transformed = []
for tup in obj.value:
transformed.append(periodToString(tup, cls.forceUTC))
if len(transformed) > 0:
tzid = TimezoneComponent.registerTzinfo(tup[0].tzinfo)
if not cls.forceUTC and tzid is not None:
obj.tzid_param = tzid
obj.value = ','.join(transformed)
return obj |
def cell_nodes_x(self):
"""The unstructured x-boundaries with shape (N, m) where m > 2"""
decoder = self.decoder
xcoord = self.xcoord
data = self.data
xbounds = decoder.get_cell_node_coord(
data, coords=data.coords, axis='x')
if self.plotter.convert_radian:
xbounds = convert_radian(xbounds, xcoord, xbounds)
return xbounds.values | The unstructured x-boundaries with shape (N, m) where m > 2 | Below is the the instruction that describes the task:
### Input:
The unstructured x-boundaries with shape (N, m) where m > 2
### Response:
def cell_nodes_x(self):
"""The unstructured x-boundaries with shape (N, m) where m > 2"""
decoder = self.decoder
xcoord = self.xcoord
data = self.data
xbounds = decoder.get_cell_node_coord(
data, coords=data.coords, axis='x')
if self.plotter.convert_radian:
xbounds = convert_radian(xbounds, xcoord, xbounds)
return xbounds.values |
def _retry_task(provider, job_descriptor, task_id, task_attempt):
"""Retry task_id (numeric id) assigning it task_attempt."""
td_orig = job_descriptor.find_task_descriptor(task_id)
new_task_descriptors = [
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': task_attempt
}, td_orig.task_params, td_orig.task_resources)
]
# Update the logging path.
_resolve_task_resources(job_descriptor.job_metadata,
job_descriptor.job_resources, new_task_descriptors)
provider.submit_job(
job_model.JobDescriptor(
job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, new_task_descriptors), False) | Retry task_id (numeric id) assigning it task_attempt. | Below is the the instruction that describes the task:
### Input:
Retry task_id (numeric id) assigning it task_attempt.
### Response:
def _retry_task(provider, job_descriptor, task_id, task_attempt):
"""Retry task_id (numeric id) assigning it task_attempt."""
td_orig = job_descriptor.find_task_descriptor(task_id)
new_task_descriptors = [
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': task_attempt
}, td_orig.task_params, td_orig.task_resources)
]
# Update the logging path.
_resolve_task_resources(job_descriptor.job_metadata,
job_descriptor.job_resources, new_task_descriptors)
provider.submit_job(
job_model.JobDescriptor(
job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, new_task_descriptors), False) |
def show_files(md5):
'''Renders template with `view` of the md5.'''
if not WORKBENCH:
return flask.redirect('/')
md5_view = WORKBENCH.work_request('view', md5)
return flask.render_template('templates/md5_view.html', md5_view=md5_view['view'], md5=md5) | Renders template with `view` of the md5. | Below is the the instruction that describes the task:
### Input:
Renders template with `view` of the md5.
### Response:
def show_files(md5):
'''Renders template with `view` of the md5.'''
if not WORKBENCH:
return flask.redirect('/')
md5_view = WORKBENCH.work_request('view', md5)
return flask.render_template('templates/md5_view.html', md5_view=md5_view['view'], md5=md5) |
def verify(token, public_key, validate_nonce=None, algorithms=[DEFAULT_ALGORITHM]):
"""
Verify the validity of the given JWT using the given public key.
:param token: JWM claim
:param public_key: Public key to use when verifying the claim's signature.
:param validate_nonce: Callable to use to validate the claim's nonce.
:param algorithms: Allowable signing algorithms. Defaults to ['RS512'].
:return: False if the token is determined to be invalid or a dictionary of the token data if it is valid.
"""
try:
token_data = jwt.decode(token, public_key, algorithms=algorithms)
except jwt.InvalidTokenError:
logger.debug('JWT failed verification')
return False
claimed_username = token_data.get('username')
claimed_time = token_data.get('time', 0)
claimed_nonce = token_data.get('nonce')
# Ensure time is within acceptable bounds
current_time = time.time()
min_time, max_time = (current_time - TIMESTAMP_TOLERANCE, current_time + TIMESTAMP_TOLERANCE)
if claimed_time < min_time or claimed_time > max_time:
logger.debug('Claimed time is outside of allowable tolerances')
return False
# Ensure nonce is unique
if validate_nonce:
if not validate_nonce(claimed_username, claimed_time, claimed_nonce):
logger.debug('Claimed nonce failed to validate')
return False
else:
logger.warning('validate_nonce function was not supplied!')
# If we've gotten this far, the token is valid
return token_data | Verify the validity of the given JWT using the given public key.
:param token: JWM claim
:param public_key: Public key to use when verifying the claim's signature.
:param validate_nonce: Callable to use to validate the claim's nonce.
:param algorithms: Allowable signing algorithms. Defaults to ['RS512'].
:return: False if the token is determined to be invalid or a dictionary of the token data if it is valid. | Below is the the instruction that describes the task:
### Input:
Verify the validity of the given JWT using the given public key.
:param token: JWM claim
:param public_key: Public key to use when verifying the claim's signature.
:param validate_nonce: Callable to use to validate the claim's nonce.
:param algorithms: Allowable signing algorithms. Defaults to ['RS512'].
:return: False if the token is determined to be invalid or a dictionary of the token data if it is valid.
### Response:
def verify(token, public_key, validate_nonce=None, algorithms=[DEFAULT_ALGORITHM]):
"""
Verify the validity of the given JWT using the given public key.
:param token: JWM claim
:param public_key: Public key to use when verifying the claim's signature.
:param validate_nonce: Callable to use to validate the claim's nonce.
:param algorithms: Allowable signing algorithms. Defaults to ['RS512'].
:return: False if the token is determined to be invalid or a dictionary of the token data if it is valid.
"""
try:
token_data = jwt.decode(token, public_key, algorithms=algorithms)
except jwt.InvalidTokenError:
logger.debug('JWT failed verification')
return False
claimed_username = token_data.get('username')
claimed_time = token_data.get('time', 0)
claimed_nonce = token_data.get('nonce')
# Ensure time is within acceptable bounds
current_time = time.time()
min_time, max_time = (current_time - TIMESTAMP_TOLERANCE, current_time + TIMESTAMP_TOLERANCE)
if claimed_time < min_time or claimed_time > max_time:
logger.debug('Claimed time is outside of allowable tolerances')
return False
# Ensure nonce is unique
if validate_nonce:
if not validate_nonce(claimed_username, claimed_time, claimed_nonce):
logger.debug('Claimed nonce failed to validate')
return False
else:
logger.warning('validate_nonce function was not supplied!')
# If we've gotten this far, the token is valid
return token_data |
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples | labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample] | Below is the the instruction that describes the task:
### Input:
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
### Response:
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples |
def _remove_observer(self, signal, observer):
"""Remove an observer to a valid signal.
Parameters
----------
signal : str
a valid signal.
observer : @func
an obervation function to be removed.
"""
if observer in self._observers[signal]:
self._observers[signal].remove(observer) | Remove an observer to a valid signal.
Parameters
----------
signal : str
a valid signal.
observer : @func
an obervation function to be removed. | Below is the the instruction that describes the task:
### Input:
Remove an observer to a valid signal.
Parameters
----------
signal : str
a valid signal.
observer : @func
an obervation function to be removed.
### Response:
def _remove_observer(self, signal, observer):
"""Remove an observer to a valid signal.
Parameters
----------
signal : str
a valid signal.
observer : @func
an obervation function to be removed.
"""
if observer in self._observers[signal]:
self._observers[signal].remove(observer) |
def progress(rest):
"Display the progress of something: start|end|percent"
if rest:
left, right, amount = [piece.strip() for piece in rest.split('|')]
ticks = min(int(round(float(amount) / 10)), 10)
bar = "=" * ticks
return "%s [%-10s] %s" % (left, bar, right) | Display the progress of something: start|end|percent | Below is the the instruction that describes the task:
### Input:
Display the progress of something: start|end|percent
### Response:
def progress(rest):
"Display the progress of something: start|end|percent"
if rest:
left, right, amount = [piece.strip() for piece in rest.split('|')]
ticks = min(int(round(float(amount) / 10)), 10)
bar = "=" * ticks
return "%s [%-10s] %s" % (left, bar, right) |
def _addTags(self, **k):
"""Helper for :meth:`addTags`."""
for tag, thing in k.items():
if not isinstance(thing, (tuple, list)):
thing = (thing,)
typetag = defaulttagtype[tag]
self.rawtagdict[tag] = encode(typetag, *thing)
return self | Helper for :meth:`addTags`. | Below is the the instruction that describes the task:
### Input:
Helper for :meth:`addTags`.
### Response:
def _addTags(self, **k):
"""Helper for :meth:`addTags`."""
for tag, thing in k.items():
if not isinstance(thing, (tuple, list)):
thing = (thing,)
typetag = defaulttagtype[tag]
self.rawtagdict[tag] = encode(typetag, *thing)
return self |
def get_grid_spatial_dimensions(self, variable):
"""Returns (width, height) for the given variable"""
data = self.open_dataset(self.service).variables[variable.variable]
dimensions = list(data.dimensions)
return data.shape[dimensions.index(variable.x_dimension)], data.shape[dimensions.index(variable.y_dimension)] | Returns (width, height) for the given variable | Below is the the instruction that describes the task:
### Input:
Returns (width, height) for the given variable
### Response:
def get_grid_spatial_dimensions(self, variable):
"""Returns (width, height) for the given variable"""
data = self.open_dataset(self.service).variables[variable.variable]
dimensions = list(data.dimensions)
return data.shape[dimensions.index(variable.x_dimension)], data.shape[dimensions.index(variable.y_dimension)] |
def clamp(n, lower, upper):
"""
Restricts the given number to a lower and upper bound (inclusive)
:param n: input number
:param lower: lower bound (inclusive)
:param upper: upper bound (inclusive)
:return: clamped number
"""
if lower > upper:
lower, upper = upper, lower
return max(min(upper, n), lower) | Restricts the given number to a lower and upper bound (inclusive)
:param n: input number
:param lower: lower bound (inclusive)
:param upper: upper bound (inclusive)
:return: clamped number | Below is the the instruction that describes the task:
### Input:
Restricts the given number to a lower and upper bound (inclusive)
:param n: input number
:param lower: lower bound (inclusive)
:param upper: upper bound (inclusive)
:return: clamped number
### Response:
def clamp(n, lower, upper):
"""
Restricts the given number to a lower and upper bound (inclusive)
:param n: input number
:param lower: lower bound (inclusive)
:param upper: upper bound (inclusive)
:return: clamped number
"""
if lower > upper:
lower, upper = upper, lower
return max(min(upper, n), lower) |
def check_next(self, tag):
"""
If next tag is link with same href, combine them.
"""
if (type(tag.next_sibling) == element.Tag and
tag.next_sibling.name == 'a'):
next_tag = tag.next_sibling
if tag.get('href') and next_tag.get('href'):
href = self._parse_href(tag.get('href'))
next_href = self._parse_href(next_tag.get('href'))
if href == next_href:
next_text = next_tag.get_text()
tag.append(next_text)
self.tags_blacklist.append(next_tag) | If next tag is link with same href, combine them. | Below is the the instruction that describes the task:
### Input:
If next tag is link with same href, combine them.
### Response:
def check_next(self, tag):
"""
If next tag is link with same href, combine them.
"""
if (type(tag.next_sibling) == element.Tag and
tag.next_sibling.name == 'a'):
next_tag = tag.next_sibling
if tag.get('href') and next_tag.get('href'):
href = self._parse_href(tag.get('href'))
next_href = self._parse_href(next_tag.get('href'))
if href == next_href:
next_text = next_tag.get_text()
tag.append(next_text)
self.tags_blacklist.append(next_tag) |
def get_snapshot(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
'''
Get detailed information about a snapshot.
:param str name: The name of the snapshot given during snapshot creation.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the snapshot.
:return: A dictionary containing information about the snapshot.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_snapshot name="test-repo"
'''
_validate_config(config_path)
sources = list()
cmd = ['snapshot', 'show', '-config={}'.format(config_path),
'-with-packages={}'.format(str(with_packages).lower()),
name]
cmd_ret = _cmd_run(cmd)
ret = _parse_show_output(cmd_ret=cmd_ret)
if ret:
log.debug('Found shapshot: %s', name)
else:
log.debug('Unable to find snapshot: %s', name)
return ret | Get detailed information about a snapshot.
:param str name: The name of the snapshot given during snapshot creation.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the snapshot.
:return: A dictionary containing information about the snapshot.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_snapshot name="test-repo" | Below is the the instruction that describes the task:
### Input:
Get detailed information about a snapshot.
:param str name: The name of the snapshot given during snapshot creation.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the snapshot.
:return: A dictionary containing information about the snapshot.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_snapshot name="test-repo"
### Response:
def get_snapshot(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
'''
Get detailed information about a snapshot.
:param str name: The name of the snapshot given during snapshot creation.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the snapshot.
:return: A dictionary containing information about the snapshot.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_snapshot name="test-repo"
'''
_validate_config(config_path)
sources = list()
cmd = ['snapshot', 'show', '-config={}'.format(config_path),
'-with-packages={}'.format(str(with_packages).lower()),
name]
cmd_ret = _cmd_run(cmd)
ret = _parse_show_output(cmd_ret=cmd_ret)
if ret:
log.debug('Found shapshot: %s', name)
else:
log.debug('Unable to find snapshot: %s', name)
return ret |
def ParseFromUnicode(self, value):
"""Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse
"""
precondition.AssertType(value, Text)
value = value.strip()
super(ClientURN, self).ParseFromUnicode(value)
match = self.CLIENT_ID_RE.match(self._string_urn)
if not match:
raise type_info.TypeValueError("Client urn malformed: %s" % value)
clientid = match.group("clientid")
clientid_correctcase = "".join((clientid[0].upper(), clientid[1:].lower()))
self._string_urn = self._string_urn.replace(clientid, clientid_correctcase,
1) | Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse | Below is the the instruction that describes the task:
### Input:
Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse
### Response:
def ParseFromUnicode(self, value):
"""Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse
"""
precondition.AssertType(value, Text)
value = value.strip()
super(ClientURN, self).ParseFromUnicode(value)
match = self.CLIENT_ID_RE.match(self._string_urn)
if not match:
raise type_info.TypeValueError("Client urn malformed: %s" % value)
clientid = match.group("clientid")
clientid_correctcase = "".join((clientid[0].upper(), clientid[1:].lower()))
self._string_urn = self._string_urn.replace(clientid, clientid_correctcase,
1) |
def merge_figures(figures):
"""
Generates a single Figure from a list of figures
Parameters:
-----------
figures : list(Figures)
List of figures to be merged.
"""
figure={}
data=[]
for fig in figures:
for trace in fig['data']:
data.append(trace)
layout=get_base_layout(figures)
figure['data']=data
figure['layout']=layout
return figure | Generates a single Figure from a list of figures
Parameters:
-----------
figures : list(Figures)
List of figures to be merged. | Below is the the instruction that describes the task:
### Input:
Generates a single Figure from a list of figures
Parameters:
-----------
figures : list(Figures)
List of figures to be merged.
### Response:
def merge_figures(figures):
"""
Generates a single Figure from a list of figures
Parameters:
-----------
figures : list(Figures)
List of figures to be merged.
"""
figure={}
data=[]
for fig in figures:
for trace in fig['data']:
data.append(trace)
layout=get_base_layout(figures)
figure['data']=data
figure['layout']=layout
return figure |
def _single_multichannel_mp_run(X, Phi, bound, selection_rule, stop_crit,
max_iter, verbose=False, pad=0,
random_state=None, memory=Memory(None)):
""" run of the structured variant of the RSSMP algorithm """
rng = check_random_state(random_state)
# padding as v stak
pad = int(pad)
n_channels = X.shape[0]
X = np.hstack((np.zeros((n_channels, pad)), X,
np.zeros((n_channels, pad))))
n_samples = X.shape[1]
n_projs = Phi.doth(X).shape[1]
err_mse = {}
# Initialisation
residual = np.hstack((X.copy(), np.zeros((n_channels,
max(Phi.sizes) / 2))))
s_rep = np.zeros((n_channels, n_projs))
X_est = np.zeros((n_channels, n_samples))
# Main algorithm
coeffs = np.zeros((n_channels, n_projs))
it_number = 0
current_lambda = 1
for c_idx in range(n_channels):
err_mse[c_idx] = []
err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))
# Decomposition loop: stopping criteria is either SNR or iteration number
while (current_lambda > bound) & (it_number < max_iter):
# pick a shift at random : in each size
rndshifts = {}
for c_idx in range(n_channels):
rndshifts[c_idx] = []
for s_idx, L in enumerate(Phi.sizes):
shift = rng.randint(low=0, high=L / 4)
for c_idx in range(n_channels):
coeffs[c_idx, s_idx * n_samples:(s_idx + 1) * n_samples] = \
mdct(residual[c_idx, shift:shift + n_samples], L).ravel()
rndshifts[c_idx].append(shift)
# Multichannel mode : we combine projections
combined = selection_rule(coeffs ** 2)
# Select a new element
idx = np.argmax(np.abs(combined))
# find scale and frequency bin of selected atom
s_idx = idx // n_samples
L = Phi.sizes[s_idx]
F = n_samples // (L // 2)
frame = (idx - (s_idx * n_samples)) % F
freq_bin = ((idx - (s_idx * n_samples))) // F
mdct_wf = memory.cache(mdct_waveform)
# Update coefficients and residual
current_lambda_array = np.zeros(n_channels)
for c_idx in range(n_channels):
s_rep[c_idx, idx] += coeffs[c_idx, idx]
# Only one method now : local update via a cached waveform
pos = (frame * L / 2) - L / 4 + rndshifts[c_idx][s_idx]
residual[c_idx, pos:pos + L] -= coeffs[c_idx, idx] * \
mdct_wf(L, freq_bin)
# also add it to the reconstruction
X_est[c_idx, pos:pos + L] += coeffs[c_idx, idx] * \
mdct_wf(L, freq_bin)
# error computation (err_mse)
err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))
current_lambda_array[c_idx] = np.sqrt(
1. - err_mse[c_idx][-1] / err_mse[c_idx][-2])
current_lambda = stop_crit(current_lambda_array)
if verbose:
print("Iteration %d : Current lambda of %1.4f" % (
it_number, current_lambda))
it_number += 1
return X_est[:, pad: -pad], err_mse | run of the structured variant of the RSSMP algorithm | Below is the the instruction that describes the task:
### Input:
run of the structured variant of the RSSMP algorithm
### Response:
def _single_multichannel_mp_run(X, Phi, bound, selection_rule, stop_crit,
max_iter, verbose=False, pad=0,
random_state=None, memory=Memory(None)):
""" run of the structured variant of the RSSMP algorithm """
rng = check_random_state(random_state)
# padding as v stak
pad = int(pad)
n_channels = X.shape[0]
X = np.hstack((np.zeros((n_channels, pad)), X,
np.zeros((n_channels, pad))))
n_samples = X.shape[1]
n_projs = Phi.doth(X).shape[1]
err_mse = {}
# Initialisation
residual = np.hstack((X.copy(), np.zeros((n_channels,
max(Phi.sizes) / 2))))
s_rep = np.zeros((n_channels, n_projs))
X_est = np.zeros((n_channels, n_samples))
# Main algorithm
coeffs = np.zeros((n_channels, n_projs))
it_number = 0
current_lambda = 1
for c_idx in range(n_channels):
err_mse[c_idx] = []
err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))
# Decomposition loop: stopping criteria is either SNR or iteration number
while (current_lambda > bound) & (it_number < max_iter):
# pick a shift at random : in each size
rndshifts = {}
for c_idx in range(n_channels):
rndshifts[c_idx] = []
for s_idx, L in enumerate(Phi.sizes):
shift = rng.randint(low=0, high=L / 4)
for c_idx in range(n_channels):
coeffs[c_idx, s_idx * n_samples:(s_idx + 1) * n_samples] = \
mdct(residual[c_idx, shift:shift + n_samples], L).ravel()
rndshifts[c_idx].append(shift)
# Multichannel mode : we combine projections
combined = selection_rule(coeffs ** 2)
# Select a new element
idx = np.argmax(np.abs(combined))
# find scale and frequency bin of selected atom
s_idx = idx // n_samples
L = Phi.sizes[s_idx]
F = n_samples // (L // 2)
frame = (idx - (s_idx * n_samples)) % F
freq_bin = ((idx - (s_idx * n_samples))) // F
mdct_wf = memory.cache(mdct_waveform)
# Update coefficients and residual
current_lambda_array = np.zeros(n_channels)
for c_idx in range(n_channels):
s_rep[c_idx, idx] += coeffs[c_idx, idx]
# Only one method now : local update via a cached waveform
pos = (frame * L / 2) - L / 4 + rndshifts[c_idx][s_idx]
residual[c_idx, pos:pos + L] -= coeffs[c_idx, idx] * \
mdct_wf(L, freq_bin)
# also add it to the reconstruction
X_est[c_idx, pos:pos + L] += coeffs[c_idx, idx] * \
mdct_wf(L, freq_bin)
# error computation (err_mse)
err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))
current_lambda_array[c_idx] = np.sqrt(
1. - err_mse[c_idx][-1] / err_mse[c_idx][-2])
current_lambda = stop_crit(current_lambda_array)
if verbose:
print("Iteration %d : Current lambda of %1.4f" % (
it_number, current_lambda))
it_number += 1
return X_est[:, pad: -pad], err_mse |
def update_query(url, params, remove=None):
"""Updates a URL's query parameters.
Replaces any current values if they are already present in the URL.
Args:
url (str): The URL to update.
params (Mapping[str, str]): A mapping of query parameter
keys to values.
remove (Sequence[str]): Parameters to remove from the query string.
Returns:
str: The URL with updated query parameters.
Examples:
>>> url = 'http://example.com?a=1'
>>> update_query(url, {'a': '2'})
http://example.com?a=2
>>> update_query(url, {'b': '3'})
http://example.com?a=1&b=3
>> update_query(url, {'b': '3'}, remove=['a'])
http://example.com?b=3
"""
if remove is None:
remove = []
# Split the URL into parts.
parts = urllib.parse.urlparse(url)
# Parse the query string.
query_params = urllib.parse.parse_qs(parts.query)
# Update the query parameters with the new parameters.
query_params.update(params)
# Remove any values specified in remove.
query_params = {
key: value for key, value
in six.iteritems(query_params)
if key not in remove}
# Re-encoded the query string.
new_query = urllib.parse.urlencode(query_params, doseq=True)
# Unsplit the url.
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts) | Updates a URL's query parameters.
Replaces any current values if they are already present in the URL.
Args:
url (str): The URL to update.
params (Mapping[str, str]): A mapping of query parameter
keys to values.
remove (Sequence[str]): Parameters to remove from the query string.
Returns:
str: The URL with updated query parameters.
Examples:
>>> url = 'http://example.com?a=1'
>>> update_query(url, {'a': '2'})
http://example.com?a=2
>>> update_query(url, {'b': '3'})
http://example.com?a=1&b=3
>> update_query(url, {'b': '3'}, remove=['a'])
http://example.com?b=3 | Below is the the instruction that describes the task:
### Input:
Updates a URL's query parameters.
Replaces any current values if they are already present in the URL.
Args:
url (str): The URL to update.
params (Mapping[str, str]): A mapping of query parameter
keys to values.
remove (Sequence[str]): Parameters to remove from the query string.
Returns:
str: The URL with updated query parameters.
Examples:
>>> url = 'http://example.com?a=1'
>>> update_query(url, {'a': '2'})
http://example.com?a=2
>>> update_query(url, {'b': '3'})
http://example.com?a=1&b=3
>> update_query(url, {'b': '3'}, remove=['a'])
http://example.com?b=3
### Response:
def update_query(url, params, remove=None):
"""Updates a URL's query parameters.
Replaces any current values if they are already present in the URL.
Args:
url (str): The URL to update.
params (Mapping[str, str]): A mapping of query parameter
keys to values.
remove (Sequence[str]): Parameters to remove from the query string.
Returns:
str: The URL with updated query parameters.
Examples:
>>> url = 'http://example.com?a=1'
>>> update_query(url, {'a': '2'})
http://example.com?a=2
>>> update_query(url, {'b': '3'})
http://example.com?a=1&b=3
>> update_query(url, {'b': '3'}, remove=['a'])
http://example.com?b=3
"""
if remove is None:
remove = []
# Split the URL into parts.
parts = urllib.parse.urlparse(url)
# Parse the query string.
query_params = urllib.parse.parse_qs(parts.query)
# Update the query parameters with the new parameters.
query_params.update(params)
# Remove any values specified in remove.
query_params = {
key: value for key, value
in six.iteritems(query_params)
if key not in remove}
# Re-encoded the query string.
new_query = urllib.parse.urlencode(query_params, doseq=True)
# Unsplit the url.
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts) |
def close(self):
"""
close all http connections.
returns a deferred that fires once they're all closed.
"""
def validate_client(client):
"""
Validate that the connection is for the current client
:param client:
:return:
"""
host, port = client.addr
parsed_url = urlparse(self._hostname)
return host == parsed_url.hostname and port == parsed_url.port
# read https://github.com/twisted/treq/issues/86
# to understand the following...
def _check_fds(_):
fds = set(reactor.getReaders() + reactor.getReaders())
if not [fd for fd in fds if isinstance(fd, Client) and validate_client(fd)]:
return
return deferLater(reactor, 0, _check_fds, None)
pool = self._async_http_client_params["pool"]
return pool.closeCachedConnections().addBoth(_check_fds) | close all http connections.
returns a deferred that fires once they're all closed. | Below is the the instruction that describes the task:
### Input:
close all http connections.
returns a deferred that fires once they're all closed.
### Response:
def close(self):
"""
close all http connections.
returns a deferred that fires once they're all closed.
"""
def validate_client(client):
"""
Validate that the connection is for the current client
:param client:
:return:
"""
host, port = client.addr
parsed_url = urlparse(self._hostname)
return host == parsed_url.hostname and port == parsed_url.port
# read https://github.com/twisted/treq/issues/86
# to understand the following...
def _check_fds(_):
fds = set(reactor.getReaders() + reactor.getReaders())
if not [fd for fd in fds if isinstance(fd, Client) and validate_client(fd)]:
return
return deferLater(reactor, 0, _check_fds, None)
pool = self._async_http_client_params["pool"]
return pool.closeCachedConnections().addBoth(_check_fds) |
def count_lines_in_file(self, fname=''):
""" you wont believe what this method does """
i = 0
if fname == '':
fname = self.fullname
try:
#with open(fname, encoding="utf8") as f:
with codecs.open(fname, "r",encoding='utf8', errors='ignore') as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print('cant count lines in file in "', fname, '":', str(ex))
return 0 | you wont believe what this method does | Below is the the instruction that describes the task:
### Input:
you wont believe what this method does
### Response:
def count_lines_in_file(self, fname=''):
""" you wont believe what this method does """
i = 0
if fname == '':
fname = self.fullname
try:
#with open(fname, encoding="utf8") as f:
with codecs.open(fname, "r",encoding='utf8', errors='ignore') as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print('cant count lines in file in "', fname, '":', str(ex))
return 0 |
def import_class(self, import_name):
"""
import selected class
:param import_name, str, e.g. some.module.MyClass
:return the class
"""
module_name, class_name = import_name.rsplit(".", 1)
mod = import_module(module_name)
check_class = getattr(mod, class_name)
self.mapping[check_class.name] = check_class
logger.info("successfully loaded class %s", check_class)
return check_class | import selected class
:param import_name, str, e.g. some.module.MyClass
:return the class | Below is the the instruction that describes the task:
### Input:
import selected class
:param import_name, str, e.g. some.module.MyClass
:return the class
### Response:
def import_class(self, import_name):
"""
import selected class
:param import_name, str, e.g. some.module.MyClass
:return the class
"""
module_name, class_name = import_name.rsplit(".", 1)
mod = import_module(module_name)
check_class = getattr(mod, class_name)
self.mapping[check_class.name] = check_class
logger.info("successfully loaded class %s", check_class)
return check_class |
def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist | Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid. | Below is the the instruction that describes the task:
### Input:
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
### Response:
def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist |
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState) | Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed. | Below is the the instruction that describes the task:
### Input:
Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
### Response:
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState) |
def decorated_with(func: astroid.FunctionDef, qnames: Iterable[str]) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
try:
if any(
i is not None and i.qname() in qnames for i in decorator_node.infer()
):
return True
except astroid.InferenceError:
continue
return False | Determine if the `func` node has a decorator with the qualified name `qname`. | Below is the the instruction that describes the task:
### Input:
Determine if the `func` node has a decorator with the qualified name `qname`.
### Response:
def decorated_with(func: astroid.FunctionDef, qnames: Iterable[str]) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
try:
if any(
i is not None and i.qname() in qnames for i in decorator_node.infer()
):
return True
except astroid.InferenceError:
continue
return False |
def write_config(self):
"""Write the configuration to a local file.
:return: Boolean if successful
"""
json.dump(
self.config,
open(CONFIG_FILE, 'w'),
indent=4,
separators=(',', ': ')
)
return True | Write the configuration to a local file.
:return: Boolean if successful | Below is the the instruction that describes the task:
### Input:
Write the configuration to a local file.
:return: Boolean if successful
### Response:
def write_config(self):
"""Write the configuration to a local file.
:return: Boolean if successful
"""
json.dump(
self.config,
open(CONFIG_FILE, 'w'),
indent=4,
separators=(',', ': ')
)
return True |
def address_to_coords(self, address):
"""Convert address to coordinates"""
base_coords = self.BASE_COORDS[self.region]
get_cord = self.COORD_SERVERS[self.region]
url_options = {
"q": address,
"lang": "eng",
"origin": "livemap",
"lat": base_coords["lat"],
"lon": base_coords["lon"]
}
response = requests.get(self.WAZE_URL + get_cord, params=url_options, headers=self.HEADERS)
for response_json in response.json():
if response_json.get('city'):
lat = response_json['location']['lat']
lon = response_json['location']['lon']
bounds = response_json['bounds'] # sometimes the coords don't match up
if bounds is not None:
bounds['top'], bounds['bottom'] = max(bounds['top'], bounds['bottom']), min(bounds['top'], bounds['bottom'])
bounds['left'], bounds['right'] = min(bounds['left'], bounds['right']), max(bounds['left'], bounds['right'])
else:
bounds = {}
return {"lat": lat, "lon": lon, "bounds": bounds}
raise WRCError("Cannot get coords for %s" % address) | Convert address to coordinates | Below is the the instruction that describes the task:
### Input:
Convert address to coordinates
### Response:
def address_to_coords(self, address):
"""Convert address to coordinates"""
base_coords = self.BASE_COORDS[self.region]
get_cord = self.COORD_SERVERS[self.region]
url_options = {
"q": address,
"lang": "eng",
"origin": "livemap",
"lat": base_coords["lat"],
"lon": base_coords["lon"]
}
response = requests.get(self.WAZE_URL + get_cord, params=url_options, headers=self.HEADERS)
for response_json in response.json():
if response_json.get('city'):
lat = response_json['location']['lat']
lon = response_json['location']['lon']
bounds = response_json['bounds'] # sometimes the coords don't match up
if bounds is not None:
bounds['top'], bounds['bottom'] = max(bounds['top'], bounds['bottom']), min(bounds['top'], bounds['bottom'])
bounds['left'], bounds['right'] = min(bounds['left'], bounds['right']), max(bounds['left'], bounds['right'])
else:
bounds = {}
return {"lat": lat, "lon": lon, "bounds": bounds}
raise WRCError("Cannot get coords for %s" % address) |
def hist(table, field=-1, class_column=None,
title='', verbosity=2, **kwargs):
"""Plot discrete PDFs
>>> df = pd.DataFrame(pd.np.random.randn(99,3), columns=list('ABC'))
>>> df['Class'] = pd.np.array((pd.np.matrix([1,1,1])*pd.np.matrix(df).T).T > 0)
>>> len(hist(df, verbosity=0, class_column='Class'))
3
"""
field = fuzzy_index_match(table, field)
if not isinstance(table, (pd.DataFrame, basestring)):
try:
table = make_dataframe(table.objects.filter(**{field + '__isnull': False}))
except:
table = table
# labels = get_column_labels(table)
try:
table = table[pd.notnull(table[field])]
except:
pass
series_labels = []
if class_column is not None:
series_labels = sorted(set(table[class_column]))
labels = [str(c) for c in series_labels] + ['all']
default_kwargs = {
'normed': False,
'histtype': 'bar',
'color': seaborn.color_palette(),
'label': labels,
'log': True,
'bins': 10,
}
default_kwargs.update(kwargs)
num_colors = len(default_kwargs['color'])
num_labels = len(default_kwargs['label'])
default_kwargs['color'] = [default_kwargs['color'][i % num_colors] for i in range(num_labels)]
if not title:
title = '{} vs. {}'.format(titlecase(str(field).replace('_', ' ')),
titlecase(str(class_column).replace('_', ' ')))
if verbosity > 0:
print('Plotting histogram titled: {}'.format(title))
if verbosity > 1:
print('histogram configuration: {}'.format(default_kwargs))
x = [table[(table[class_column].isnull() if pd.isnull(c) else table[class_column] == c)]
[field].values for c in series_labels]
x += [table[field].values]
if not default_kwargs['normed']:
default_kwargs['weights'] = [pd.np.ones_like(x_c) / float(len(x_c)) for x_c in x]
elif isinstance(default_kwargs['normed'], int) and default_kwargs['normed'] < 0:
default_kwargs['normed'] = 0
bins = default_kwargs['bins']
# FIXME: x log scaling doesn't work
if False and default_kwargs['log'] and isinstance(bins, int):
max_x = max(pd.np.max(x_c) for x_c in x)
min_x = min(pd.np.min(x_c) for x_c in x)
if pd.isnull(min_x) or not(min_x):
min_x = max_x / 10.
default_kwargs['bins'] = pd.np.logspace(min_x, max_x, bins)
fig, ax = plt.subplots()
ans = plt.hist(x, **default_kwargs)
# FIXME: x log scaling doesn't work
if False and default_kwargs['log'] and isinstance(bins, int):
ax.set_xscale('log')
if verbosity > 1:
plt.legend(default_kwargs['label'])
try:
plt.show(block=False)
except:
plt.show()
plt.title(title)
plt.xlabel(titlecase(field.replace('_', ' ')))
if 'weights' in default_kwargs:
plt.ylabel('Normalized Frequency or Probability')
elif default_kwargs['normed']:
plt.ylabel('Normalized Count')
else:
plt.ylabel('Count')
if verbosity > 2:
plt.savefig(make_timestamp() + '--' + title.replace(' ', '-') + '.png', transparent=True)
return ans | Plot discrete PDFs
>>> df = pd.DataFrame(pd.np.random.randn(99,3), columns=list('ABC'))
>>> df['Class'] = pd.np.array((pd.np.matrix([1,1,1])*pd.np.matrix(df).T).T > 0)
>>> len(hist(df, verbosity=0, class_column='Class'))
3 | Below is the the instruction that describes the task:
### Input:
Plot discrete PDFs
>>> df = pd.DataFrame(pd.np.random.randn(99,3), columns=list('ABC'))
>>> df['Class'] = pd.np.array((pd.np.matrix([1,1,1])*pd.np.matrix(df).T).T > 0)
>>> len(hist(df, verbosity=0, class_column='Class'))
3
### Response:
def hist(table, field=-1, class_column=None,
title='', verbosity=2, **kwargs):
"""Plot discrete PDFs
>>> df = pd.DataFrame(pd.np.random.randn(99,3), columns=list('ABC'))
>>> df['Class'] = pd.np.array((pd.np.matrix([1,1,1])*pd.np.matrix(df).T).T > 0)
>>> len(hist(df, verbosity=0, class_column='Class'))
3
"""
field = fuzzy_index_match(table, field)
if not isinstance(table, (pd.DataFrame, basestring)):
try:
table = make_dataframe(table.objects.filter(**{field + '__isnull': False}))
except:
table = table
# labels = get_column_labels(table)
try:
table = table[pd.notnull(table[field])]
except:
pass
series_labels = []
if class_column is not None:
series_labels = sorted(set(table[class_column]))
labels = [str(c) for c in series_labels] + ['all']
default_kwargs = {
'normed': False,
'histtype': 'bar',
'color': seaborn.color_palette(),
'label': labels,
'log': True,
'bins': 10,
}
default_kwargs.update(kwargs)
num_colors = len(default_kwargs['color'])
num_labels = len(default_kwargs['label'])
default_kwargs['color'] = [default_kwargs['color'][i % num_colors] for i in range(num_labels)]
if not title:
title = '{} vs. {}'.format(titlecase(str(field).replace('_', ' ')),
titlecase(str(class_column).replace('_', ' ')))
if verbosity > 0:
print('Plotting histogram titled: {}'.format(title))
if verbosity > 1:
print('histogram configuration: {}'.format(default_kwargs))
x = [table[(table[class_column].isnull() if pd.isnull(c) else table[class_column] == c)]
[field].values for c in series_labels]
x += [table[field].values]
if not default_kwargs['normed']:
default_kwargs['weights'] = [pd.np.ones_like(x_c) / float(len(x_c)) for x_c in x]
elif isinstance(default_kwargs['normed'], int) and default_kwargs['normed'] < 0:
default_kwargs['normed'] = 0
bins = default_kwargs['bins']
# FIXME: x log scaling doesn't work
if False and default_kwargs['log'] and isinstance(bins, int):
max_x = max(pd.np.max(x_c) for x_c in x)
min_x = min(pd.np.min(x_c) for x_c in x)
if pd.isnull(min_x) or not(min_x):
min_x = max_x / 10.
default_kwargs['bins'] = pd.np.logspace(min_x, max_x, bins)
fig, ax = plt.subplots()
ans = plt.hist(x, **default_kwargs)
# FIXME: x log scaling doesn't work
if False and default_kwargs['log'] and isinstance(bins, int):
ax.set_xscale('log')
if verbosity > 1:
plt.legend(default_kwargs['label'])
try:
plt.show(block=False)
except:
plt.show()
plt.title(title)
plt.xlabel(titlecase(field.replace('_', ' ')))
if 'weights' in default_kwargs:
plt.ylabel('Normalized Frequency or Probability')
elif default_kwargs['normed']:
plt.ylabel('Normalized Count')
else:
plt.ylabel('Count')
if verbosity > 2:
plt.savefig(make_timestamp() + '--' + title.replace(' ', '-') + '.png', transparent=True)
return ans |
def implicit_includes (self, feature, target_type):
""" Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
"""
assert isinstance(feature, basestring)
assert isinstance(target_type, basestring)
if not target_type:
key = feature
else:
key = feature + "-" + target_type
result = self.implicit_includes_cache_.get(key)
if not result:
target_paths = self.all_target_directories(target_type)
target_paths = unique(target_paths)
result = ["<%s>%s" % (feature, p) for p in target_paths]
self.implicit_includes_cache_[key] = result
return result | Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target. | Below is the the instruction that describes the task:
### Input:
Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
### Response:
def implicit_includes (self, feature, target_type):
""" Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
"""
assert isinstance(feature, basestring)
assert isinstance(target_type, basestring)
if not target_type:
key = feature
else:
key = feature + "-" + target_type
result = self.implicit_includes_cache_.get(key)
if not result:
target_paths = self.all_target_directories(target_type)
target_paths = unique(target_paths)
result = ["<%s>%s" % (feature, p) for p in target_paths]
self.implicit_includes_cache_[key] = result
return result |
def get_template_names(self):
"""
Return the page's specified template name, or a fallback if one hasn't been chosen.
"""
posted_name = self.request.POST.get('template_name')
if posted_name:
return [posted_name,]
else:
return super(PagePreviewView, self).get_template_names() | Return the page's specified template name, or a fallback if one hasn't been chosen. | Below is the the instruction that describes the task:
### Input:
Return the page's specified template name, or a fallback if one hasn't been chosen.
### Response:
def get_template_names(self):
"""
Return the page's specified template name, or a fallback if one hasn't been chosen.
"""
posted_name = self.request.POST.get('template_name')
if posted_name:
return [posted_name,]
else:
return super(PagePreviewView, self).get_template_names() |
def load(self):
"""
Extract tabular data as |TableData| instances from a JSON file.
|load_source_desc_file|
This method can be loading four types of JSON formats:
**(1)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (1): single table
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (1)
[
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
]
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(2)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (2): single table
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (2)
{
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
}
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(3)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (3): single table
{
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (3)
{
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
}
The example data will be loaded as the following tabular data:
.. table::
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
**(4)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (4): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (4)
{
"table_a" : [
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
],
"table_b" : [
{"a": 1, "b": 4},
{"a": 2 },
{"a": 3, "b": 120.9}
]
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|2| None|
+-+-----+
|3|120.9|
+-+-----+
**(5)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (5): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (5)
{
"table_a" : {
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
},
"table_b" : {
"a": [1, 3],
"b": [4, 120.9]
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|3|120.9|
+-+-----+
**(6)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (6): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (6)
{
"table_a": {
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
},
"table_b": {
"a": 4,
"b": 120.9
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
.. table:: table_b
+---+-----+
|key|value|
+===+=====+
|a | 4.0|
+---+-----+
|b |120.9|
+---+-----+
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced the different value
| for each single/multiple JSON tables:
| [single JSON table]
| ``%(format_name)s%(format_id)s``
| [multiple JSON table] Table data key.
``%(format_name)s`` ``"json"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable JSON format.
"""
formatter = JsonTableFormatter(self.load_dict())
formatter.accept(self)
return formatter.to_table_data() | Extract tabular data as |TableData| instances from a JSON file.
|load_source_desc_file|
This method can be loading four types of JSON formats:
**(1)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (1): single table
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (1)
[
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
]
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(2)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (2): single table
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (2)
{
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
}
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(3)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (3): single table
{
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (3)
{
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
}
The example data will be loaded as the following tabular data:
.. table::
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
**(4)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (4): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (4)
{
"table_a" : [
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
],
"table_b" : [
{"a": 1, "b": 4},
{"a": 2 },
{"a": 3, "b": 120.9}
]
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|2| None|
+-+-----+
|3|120.9|
+-+-----+
**(5)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (5): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (5)
{
"table_a" : {
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
},
"table_b" : {
"a": [1, 3],
"b": [4, 120.9]
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|3|120.9|
+-+-----+
**(6)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (6): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (6)
{
"table_a": {
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
},
"table_b": {
"a": 4,
"b": 120.9
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
.. table:: table_b
+---+-----+
|key|value|
+===+=====+
|a | 4.0|
+---+-----+
|b |120.9|
+---+-----+
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced the different value
| for each single/multiple JSON tables:
| [single JSON table]
| ``%(format_name)s%(format_id)s``
| [multiple JSON table] Table data key.
``%(format_name)s`` ``"json"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable JSON format. | Below is the the instruction that describes the task:
### Input:
Extract tabular data as |TableData| instances from a JSON file.
|load_source_desc_file|
This method can be loading four types of JSON formats:
**(1)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (1): single table
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (1)
[
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
]
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(2)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (2): single table
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (2)
{
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
}
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(3)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (3): single table
{
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (3)
{
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
}
The example data will be loaded as the following tabular data:
.. table::
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
**(4)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (4): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (4)
{
"table_a" : [
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
],
"table_b" : [
{"a": 1, "b": 4},
{"a": 2 },
{"a": 3, "b": 120.9}
]
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|2| None|
+-+-----+
|3|120.9|
+-+-----+
**(5)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (5): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (5)
{
"table_a" : {
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
},
"table_b" : {
"a": [1, 3],
"b": [4, 120.9]
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|3|120.9|
+-+-----+
**(6)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (6): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (6)
{
"table_a": {
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
},
"table_b": {
"a": 4,
"b": 120.9
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
.. table:: table_b
+---+-----+
|key|value|
+===+=====+
|a | 4.0|
+---+-----+
|b |120.9|
+---+-----+
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced the different value
| for each single/multiple JSON tables:
| [single JSON table]
| ``%(format_name)s%(format_id)s``
| [multiple JSON table] Table data key.
``%(format_name)s`` ``"json"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable JSON format.
### Response:
def load(self):
"""
Extract tabular data as |TableData| instances from a JSON file.
|load_source_desc_file|
This method can be loading four types of JSON formats:
**(1)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (1): single table
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (1)
[
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
]
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(2)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (2): single table
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (2)
{
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
}
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(3)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (3): single table
{
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (3)
{
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
}
The example data will be loaded as the following tabular data:
.. table::
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
**(4)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (4): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (4)
{
"table_a" : [
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
],
"table_b" : [
{"a": 1, "b": 4},
{"a": 2 },
{"a": 3, "b": 120.9}
]
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|2| None|
+-+-----+
|3|120.9|
+-+-----+
**(5)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (5): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (5)
{
"table_a" : {
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
},
"table_b" : {
"a": [1, 3],
"b": [4, 120.9]
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|3|120.9|
+-+-----+
**(6)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (6): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (6)
{
"table_a": {
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
},
"table_b": {
"a": 4,
"b": 120.9
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
.. table:: table_b
+---+-----+
|key|value|
+===+=====+
|a | 4.0|
+---+-----+
|b |120.9|
+---+-----+
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced the different value
| for each single/multiple JSON tables:
| [single JSON table]
| ``%(format_name)s%(format_id)s``
| [multiple JSON table] Table data key.
``%(format_name)s`` ``"json"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable JSON format.
"""
formatter = JsonTableFormatter(self.load_dict())
formatter.accept(self)
return formatter.to_table_data() |
def is_training_name(name):
"""
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
"""
# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?
# TODO or use get_slot_names()
name = get_op_tensor_name(name)[0]
if name.endswith('/Adam') or name.endswith('/Adam_1'):
return True
if name.endswith('/Momentum'):
return True
if name.endswith('/Adadelta') or name.endswith('/Adadelta_1'):
return True
if name.endswith('/RMSProp') or name.endswith('/RMSProp_1'):
return True
if name.endswith('/Adagrad'):
return True
if name.startswith('EMA/') or '/EMA/' in name: # all the moving average summaries
return True
if name.startswith('AccumGrad') or name.endswith('/AccumGrad'):
return True
if name.startswith('apply_gradients'):
return True
return False | **Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it. | Below is the the instruction that describes the task:
### Input:
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
### Response:
def is_training_name(name):
"""
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
"""
# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?
# TODO or use get_slot_names()
name = get_op_tensor_name(name)[0]
if name.endswith('/Adam') or name.endswith('/Adam_1'):
return True
if name.endswith('/Momentum'):
return True
if name.endswith('/Adadelta') or name.endswith('/Adadelta_1'):
return True
if name.endswith('/RMSProp') or name.endswith('/RMSProp_1'):
return True
if name.endswith('/Adagrad'):
return True
if name.startswith('EMA/') or '/EMA/' in name: # all the moving average summaries
return True
if name.startswith('AccumGrad') or name.endswith('/AccumGrad'):
return True
if name.startswith('apply_gradients'):
return True
return False |
def resizeToMinimum(self):
"""
Resizes the dock toolbar to the minimum sizes.
"""
offset = self.padding()
min_size = self.minimumPixmapSize()
if self.position() in (XDockToolbar.Position.East,
XDockToolbar.Position.West):
self.resize(min_size.width() + offset, self.height())
elif self.position() in (XDockToolbar.Position.North,
XDockToolbar.Position.South):
self.resize(self.width(), min_size.height() + offset) | Resizes the dock toolbar to the minimum sizes. | Below is the the instruction that describes the task:
### Input:
Resizes the dock toolbar to the minimum sizes.
### Response:
def resizeToMinimum(self):
"""
Resizes the dock toolbar to the minimum sizes.
"""
offset = self.padding()
min_size = self.minimumPixmapSize()
if self.position() in (XDockToolbar.Position.East,
XDockToolbar.Position.West):
self.resize(min_size.width() + offset, self.height())
elif self.position() in (XDockToolbar.Position.North,
XDockToolbar.Position.South):
self.resize(self.width(), min_size.height() + offset) |
def doc(self, groups=None, set_location=True, **properties):
"""Add flask route to autodoc for automatic documentation
Any route decorated with this method will be added to the list of
routes to be documented by the generate() or html() methods.
By default, the route is added to the 'all' group.
By specifying group or groups argument, the route can be added to one
or multiple other groups as well, besides the 'all' group.
If set_location is True, the location of the function will be stored.
NOTE: this assumes that the decorator is placed just before the
function (in the normal way).
Custom parameters may also be passed in beyond groups, if they are
named something not already in the dict descibed in the docstring for
the generare() function, they will be added to the route's properties,
which can be accessed from the template.
If a parameter is passed in with a name that is already in the dict, but
not of a reserved name, the passed parameter overrides that dict value.
"""
def decorator(f):
# Get previous group list (if any)
if f in self.func_groups:
groupset = self.func_groups[f]
else:
groupset = set()
# Set group[s]
if type(groups) is list:
groupset.update(groups)
elif type(groups) is str:
groupset.add(groups)
groupset.add('all')
self.func_groups[f] = groupset
self.func_props[f] = properties
# Set location
if set_location:
caller_frame = inspect.stack()[1]
self.func_locations[f] = {
'filename': caller_frame[1],
'line': caller_frame[2],
}
return f
return decorator | Add flask route to autodoc for automatic documentation
Any route decorated with this method will be added to the list of
routes to be documented by the generate() or html() methods.
By default, the route is added to the 'all' group.
By specifying group or groups argument, the route can be added to one
or multiple other groups as well, besides the 'all' group.
If set_location is True, the location of the function will be stored.
NOTE: this assumes that the decorator is placed just before the
function (in the normal way).
Custom parameters may also be passed in beyond groups, if they are
named something not already in the dict descibed in the docstring for
the generare() function, they will be added to the route's properties,
which can be accessed from the template.
If a parameter is passed in with a name that is already in the dict, but
not of a reserved name, the passed parameter overrides that dict value. | Below is the the instruction that describes the task:
### Input:
Add flask route to autodoc for automatic documentation
Any route decorated with this method will be added to the list of
routes to be documented by the generate() or html() methods.
By default, the route is added to the 'all' group.
By specifying group or groups argument, the route can be added to one
or multiple other groups as well, besides the 'all' group.
If set_location is True, the location of the function will be stored.
NOTE: this assumes that the decorator is placed just before the
function (in the normal way).
Custom parameters may also be passed in beyond groups, if they are
named something not already in the dict descibed in the docstring for
the generare() function, they will be added to the route's properties,
which can be accessed from the template.
If a parameter is passed in with a name that is already in the dict, but
not of a reserved name, the passed parameter overrides that dict value.
### Response:
def doc(self, groups=None, set_location=True, **properties):
"""Add flask route to autodoc for automatic documentation
Any route decorated with this method will be added to the list of
routes to be documented by the generate() or html() methods.
By default, the route is added to the 'all' group.
By specifying group or groups argument, the route can be added to one
or multiple other groups as well, besides the 'all' group.
If set_location is True, the location of the function will be stored.
NOTE: this assumes that the decorator is placed just before the
function (in the normal way).
Custom parameters may also be passed in beyond groups, if they are
named something not already in the dict descibed in the docstring for
the generare() function, they will be added to the route's properties,
which can be accessed from the template.
If a parameter is passed in with a name that is already in the dict, but
not of a reserved name, the passed parameter overrides that dict value.
"""
def decorator(f):
# Get previous group list (if any)
if f in self.func_groups:
groupset = self.func_groups[f]
else:
groupset = set()
# Set group[s]
if type(groups) is list:
groupset.update(groups)
elif type(groups) is str:
groupset.add(groups)
groupset.add('all')
self.func_groups[f] = groupset
self.func_props[f] = properties
# Set location
if set_location:
caller_frame = inspect.stack()[1]
self.func_locations[f] = {
'filename': caller_frame[1],
'line': caller_frame[2],
}
return f
return decorator |
def __generate(results):
"""
Static method which generates the Junit xml string from results
:param results: Results as ResultList object.
:return: Junit xml format string.
"""
doc, tag, text = Doc().tagtext()
# Counters for testsuite tag info
count = 0
fails = 0
errors = 0
skips = 0
for result in results:
# Loop through all results and count the ones that were not later retried.
if result.passed() is False:
if result.retries_left > 0:
# This will appear in the list again, move on
continue
count += 1
if result.passed():
# Passed, no need to increment anything else
continue
elif result.skipped():
skips += 1
elif result.was_inconclusive():
errors += 1
else:
fails += 1
with tag('testsuite',
tests=str(count),
failures=str(fails),
errors=str(errors),
skipped=str(skips)):
for result in results:
if result.passed() is False and result.retries_left > 0:
continue
class_name = result.get_tc_name()
models = result.get_dut_models()
if models:
class_name = class_name + "." + models
name = result.get_toolchain()
with tag('testcase', classname=class_name, name=name,
time=result.get_duration(seconds=True)):
if result.stdout:
with tag('system-out'):
text(result.stdout)
if result.passed():
continue
elif result.skipped():
with tag('skipped'):
text(result.skip_reason)
elif result.was_inconclusive():
with tag('error', message=hex_escape_str(result.fail_reason)):
text(result.stderr)
else:
with tag('failure', message=hex_escape_str(result.fail_reason)):
text(result.stderr)
return indent(
doc.getvalue(),
indentation=' '*4
) | Static method which generates the Junit xml string from results
:param results: Results as ResultList object.
:return: Junit xml format string. | Below is the the instruction that describes the task:
### Input:
Static method which generates the Junit xml string from results
:param results: Results as ResultList object.
:return: Junit xml format string.
### Response:
def __generate(results):
"""
Static method which generates the Junit xml string from results
:param results: Results as ResultList object.
:return: Junit xml format string.
"""
doc, tag, text = Doc().tagtext()
# Counters for testsuite tag info
count = 0
fails = 0
errors = 0
skips = 0
for result in results:
# Loop through all results and count the ones that were not later retried.
if result.passed() is False:
if result.retries_left > 0:
# This will appear in the list again, move on
continue
count += 1
if result.passed():
# Passed, no need to increment anything else
continue
elif result.skipped():
skips += 1
elif result.was_inconclusive():
errors += 1
else:
fails += 1
with tag('testsuite',
tests=str(count),
failures=str(fails),
errors=str(errors),
skipped=str(skips)):
for result in results:
if result.passed() is False and result.retries_left > 0:
continue
class_name = result.get_tc_name()
models = result.get_dut_models()
if models:
class_name = class_name + "." + models
name = result.get_toolchain()
with tag('testcase', classname=class_name, name=name,
time=result.get_duration(seconds=True)):
if result.stdout:
with tag('system-out'):
text(result.stdout)
if result.passed():
continue
elif result.skipped():
with tag('skipped'):
text(result.skip_reason)
elif result.was_inconclusive():
with tag('error', message=hex_escape_str(result.fail_reason)):
text(result.stderr)
else:
with tag('failure', message=hex_escape_str(result.fail_reason)):
text(result.stderr)
return indent(
doc.getvalue(),
indentation=' '*4
) |
def coroutine(func):
"""Decorator for priming generator-based coroutines.
"""
@wraps(func)
def start(*args, **kwargs):
g = func(*args, **kwargs)
next(g)
return g
return start | Decorator for priming generator-based coroutines. | Below is the the instruction that describes the task:
### Input:
Decorator for priming generator-based coroutines.
### Response:
def coroutine(func):
"""Decorator for priming generator-based coroutines.
"""
@wraps(func)
def start(*args, **kwargs):
g = func(*args, **kwargs)
next(g)
return g
return start |
def add_edge(self, start, end, **kwargs):
"""
Add an edge between two nodes.
The nodes will be automatically added if they are not present in the network.
Parameters
----------
start: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
end: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> model = DBN()
>>> model.add_nodes_from(['D', 'I'])
>>> model.add_edge(('D',0), ('I',0))
>>> sorted(model.edges())
[(('D', 0), ('I', 0)), (('D', 1), ('I', 1))]
"""
try:
if len(start) != 2 or len(end) != 2:
raise ValueError('Nodes must be of type (node, time_slice).')
elif not isinstance(start[1], int) or not isinstance(end[1], int):
raise ValueError('Nodes must be of type (node, time_slice).')
elif start[1] == end[1]:
start = (start[0], 0)
end = (end[0], 0)
elif start[1] == end[1] - 1:
start = (start[0], 0)
end = (end[0], 1)
elif start[1] > end[1]:
raise NotImplementedError('Edges in backward direction are not allowed.')
elif start[1] != end[1]:
raise ValueError("Edges over multiple time slices is not currently supported")
except TypeError:
raise ValueError('Nodes must be of type (node, time_slice).')
if start == end:
raise ValueError('Self Loops are not allowed')
elif start in super(DynamicBayesianNetwork, self).nodes() and end \
in super(DynamicBayesianNetwork, self).nodes() and \
nx.has_path(self, end, start):
raise ValueError('Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format(
start=str(start), end=str(end)))
super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs)
if start[1] == end[1]:
super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1]))
else:
super(DynamicBayesianNetwork, self).add_node((end[0], 1 - end[1])) | Add an edge between two nodes.
The nodes will be automatically added if they are not present in the network.
Parameters
----------
start: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
end: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> model = DBN()
>>> model.add_nodes_from(['D', 'I'])
>>> model.add_edge(('D',0), ('I',0))
>>> sorted(model.edges())
[(('D', 0), ('I', 0)), (('D', 1), ('I', 1))] | Below is the the instruction that describes the task:
### Input:
Add an edge between two nodes.
The nodes will be automatically added if they are not present in the network.
Parameters
----------
start: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
end: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> model = DBN()
>>> model.add_nodes_from(['D', 'I'])
>>> model.add_edge(('D',0), ('I',0))
>>> sorted(model.edges())
[(('D', 0), ('I', 0)), (('D', 1), ('I', 1))]
### Response:
def add_edge(self, start, end, **kwargs):
"""
Add an edge between two nodes.
The nodes will be automatically added if they are not present in the network.
Parameters
----------
start: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
end: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> model = DBN()
>>> model.add_nodes_from(['D', 'I'])
>>> model.add_edge(('D',0), ('I',0))
>>> sorted(model.edges())
[(('D', 0), ('I', 0)), (('D', 1), ('I', 1))]
"""
try:
if len(start) != 2 or len(end) != 2:
raise ValueError('Nodes must be of type (node, time_slice).')
elif not isinstance(start[1], int) or not isinstance(end[1], int):
raise ValueError('Nodes must be of type (node, time_slice).')
elif start[1] == end[1]:
start = (start[0], 0)
end = (end[0], 0)
elif start[1] == end[1] - 1:
start = (start[0], 0)
end = (end[0], 1)
elif start[1] > end[1]:
raise NotImplementedError('Edges in backward direction are not allowed.')
elif start[1] != end[1]:
raise ValueError("Edges over multiple time slices is not currently supported")
except TypeError:
raise ValueError('Nodes must be of type (node, time_slice).')
if start == end:
raise ValueError('Self Loops are not allowed')
elif start in super(DynamicBayesianNetwork, self).nodes() and end \
in super(DynamicBayesianNetwork, self).nodes() and \
nx.has_path(self, end, start):
raise ValueError('Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format(
start=str(start), end=str(end)))
super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs)
if start[1] == end[1]:
super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1]))
else:
super(DynamicBayesianNetwork, self).add_node((end[0], 1 - end[1])) |
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session() | Kills old session and creates a new one with no proxies or headers | Below is the the instruction that describes the task:
### Input:
Kills old session and creates a new one with no proxies or headers
### Response:
def reset(self):
"""
Kills old session and creates a new one with no proxies or headers
"""
# Kill old connection
self.quit()
# Clear chrome configs
self.opts = webdriver.ChromeOptions()
# Create new web driver
self._create_session() |
def get_feed(self, buckets=None, since=None, results=15, start=0):
"""
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'Linkin Park'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>>
"""
kwargs = {}
kwargs['bucket'] = buckets or []
if since:
kwargs['since']=since
response = self.get_attribute("feed", results=results, start=start, **kwargs)
rval = ResultList(response['feed'])
return rval | Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'Linkin Park'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>> | Below is the the instruction that describes the task:
### Input:
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'Linkin Park'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>>
### Response:
def get_feed(self, buckets=None, since=None, results=15, start=0):
"""
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'Linkin Park'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>>
"""
kwargs = {}
kwargs['bucket'] = buckets or []
if since:
kwargs['since']=since
response = self.get_attribute("feed", results=results, start=start, **kwargs)
rval = ResultList(response['feed'])
return rval |
def new_named_args(self, cur_named_args: Dict[str, Any]) -> Dict[str, Any]:
""" create new named args updating current name args"""
named_args = cur_named_args.copy()
named_args.update(self.matchdict)
return named_args | create new named args updating current name args | Below is the the instruction that describes the task:
### Input:
create new named args updating current name args
### Response:
def new_named_args(self, cur_named_args: Dict[str, Any]) -> Dict[str, Any]:
""" create new named args updating current name args"""
named_args = cur_named_args.copy()
named_args.update(self.matchdict)
return named_args |
def add(self, group_name, session):
'''taobao.crm.group.add 卖家创建一个分组
卖家创建一个新的分组,接口返回一个创建成功的分组的id'''
request = TOPRequest('taobao.crm.group.add')
request['group_name'] = group_name
self.create(self.execute(request, session), fields=['is_success', 'group_id'])
return self.is_success, self.group_id | taobao.crm.group.add 卖家创建一个分组
卖家创建一个新的分组,接口返回一个创建成功的分组的id | Below is the the instruction that describes the task:
### Input:
taobao.crm.group.add 卖家创建一个分组
卖家创建一个新的分组,接口返回一个创建成功的分组的id
### Response:
def add(self, group_name, session):
'''taobao.crm.group.add 卖家创建一个分组
卖家创建一个新的分组,接口返回一个创建成功的分组的id'''
request = TOPRequest('taobao.crm.group.add')
request['group_name'] = group_name
self.create(self.execute(request, session), fields=['is_success', 'group_id'])
return self.is_success, self.group_id |
def eye(root=None, zodb_uri=None, port=8080):
"""Serves a WSGI app to browse objects based on a root object or ZODB URI.
"""
if root is not None:
root_factory = lambda request: Node(root)
elif zodb_uri is not None:
if '://' not in zodb_uri:
# treat it as a file://
zodb_uri = 'file://' + os.path.abspath(zodb_uri)
from repoze.zodbconn.finder import PersistentApplicationFinder
finder = PersistentApplicationFinder(zodb_uri, appmaker=lambda root: Node(root))
root_factory = lambda request: finder(request.environ)
else:
raise RuntimeError("Must specify root object or ZODB URI.")
app = Eye(root_factory)
if 'DEBUG' in os.environ:
from repoze.debug.pdbpm import PostMortemDebug
app = PostMortemDebug(app)
serve(app, host='127.0.0.1', port=port) | Serves a WSGI app to browse objects based on a root object or ZODB URI. | Below is the the instruction that describes the task:
### Input:
Serves a WSGI app to browse objects based on a root object or ZODB URI.
### Response:
def eye(root=None, zodb_uri=None, port=8080):
"""Serves a WSGI app to browse objects based on a root object or ZODB URI.
"""
if root is not None:
root_factory = lambda request: Node(root)
elif zodb_uri is not None:
if '://' not in zodb_uri:
# treat it as a file://
zodb_uri = 'file://' + os.path.abspath(zodb_uri)
from repoze.zodbconn.finder import PersistentApplicationFinder
finder = PersistentApplicationFinder(zodb_uri, appmaker=lambda root: Node(root))
root_factory = lambda request: finder(request.environ)
else:
raise RuntimeError("Must specify root object or ZODB URI.")
app = Eye(root_factory)
if 'DEBUG' in os.environ:
from repoze.debug.pdbpm import PostMortemDebug
app = PostMortemDebug(app)
serve(app, host='127.0.0.1', port=port) |
def ready(ctx, quiet):
"""Show all the checks that can be released."""
user_config = ctx.obj
cached_prs = {}
for target in sorted(get_valid_checks()):
# get the name of the current release tag
cur_version = get_version_string(target)
target_tag = get_release_tag_string(target, cur_version)
# get the diff from HEAD
diff_lines = get_commits_since(target, target_tag)
# get the number of PRs that could be potentially released
# Only show the ones that have a changelog label that isn't no-changelog
pr_numbers = parse_pr_numbers(diff_lines)
shippable_prs = 0
for pr_num in pr_numbers:
try:
if pr_num in cached_prs:
changelog_labels = cached_prs[pr_num]
if len(changelog_labels) != 1:
continue
else:
payload = get_pr(pr_num, user_config)
changelog_labels = get_changelog_types(payload)
cached_prs[pr_num] = changelog_labels
if not changelog_labels:
echo_warning(
'PR #{} has no changelog label attached, please add one! Skipping...'.format(pr_num)
)
continue
if len(changelog_labels) > 1:
echo_warning(
'Multiple changelog labels found attached to PR #{}, '
'please only use one! Skipping...'.format(pr_num)
)
continue
if changelog_labels[0] != CHANGELOG_TYPE_NONE:
shippable_prs += 1
except Exception as e:
echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e))
continue
if shippable_prs:
if quiet:
msg = target
else:
msg = 'Check {} has {} out of {} merged PRs that could be released' ''.format(
target, shippable_prs, len(pr_numbers)
)
echo_info(msg) | Show all the checks that can be released. | Below is the the instruction that describes the task:
### Input:
Show all the checks that can be released.
### Response:
def ready(ctx, quiet):
"""Show all the checks that can be released."""
user_config = ctx.obj
cached_prs = {}
for target in sorted(get_valid_checks()):
# get the name of the current release tag
cur_version = get_version_string(target)
target_tag = get_release_tag_string(target, cur_version)
# get the diff from HEAD
diff_lines = get_commits_since(target, target_tag)
# get the number of PRs that could be potentially released
# Only show the ones that have a changelog label that isn't no-changelog
pr_numbers = parse_pr_numbers(diff_lines)
shippable_prs = 0
for pr_num in pr_numbers:
try:
if pr_num in cached_prs:
changelog_labels = cached_prs[pr_num]
if len(changelog_labels) != 1:
continue
else:
payload = get_pr(pr_num, user_config)
changelog_labels = get_changelog_types(payload)
cached_prs[pr_num] = changelog_labels
if not changelog_labels:
echo_warning(
'PR #{} has no changelog label attached, please add one! Skipping...'.format(pr_num)
)
continue
if len(changelog_labels) > 1:
echo_warning(
'Multiple changelog labels found attached to PR #{}, '
'please only use one! Skipping...'.format(pr_num)
)
continue
if changelog_labels[0] != CHANGELOG_TYPE_NONE:
shippable_prs += 1
except Exception as e:
echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e))
continue
if shippable_prs:
if quiet:
msg = target
else:
msg = 'Check {} has {} out of {} merged PRs that could be released' ''.format(
target, shippable_prs, len(pr_numbers)
)
echo_info(msg) |
def _realGetLoader(n, default=_marker):
"""
Search all themes for a template named C{n}, returning a loader
for it if found. If not found and a default is passed, the default
will be returned. Otherwise C{RuntimeError} will be raised.
This function is deprecated in favor of using a L{ThemedElement}
for your view code, or calling
ITemplateNameResolver(userStore).getDocFactory.
"""
for t in getAllThemes():
fact = t.getDocFactory(n, None)
if fact is not None:
return fact
if default is _marker:
raise RuntimeError("No loader for %r anywhere" % (n,))
return default | Search all themes for a template named C{n}, returning a loader
for it if found. If not found and a default is passed, the default
will be returned. Otherwise C{RuntimeError} will be raised.
This function is deprecated in favor of using a L{ThemedElement}
for your view code, or calling
ITemplateNameResolver(userStore).getDocFactory. | Below is the the instruction that describes the task:
### Input:
Search all themes for a template named C{n}, returning a loader
for it if found. If not found and a default is passed, the default
will be returned. Otherwise C{RuntimeError} will be raised.
This function is deprecated in favor of using a L{ThemedElement}
for your view code, or calling
ITemplateNameResolver(userStore).getDocFactory.
### Response:
def _realGetLoader(n, default=_marker):
"""
Search all themes for a template named C{n}, returning a loader
for it if found. If not found and a default is passed, the default
will be returned. Otherwise C{RuntimeError} will be raised.
This function is deprecated in favor of using a L{ThemedElement}
for your view code, or calling
ITemplateNameResolver(userStore).getDocFactory.
"""
for t in getAllThemes():
fact = t.getDocFactory(n, None)
if fact is not None:
return fact
if default is _marker:
raise RuntimeError("No loader for %r anywhere" % (n,))
return default |
async def create_cred(
self,
cred_offer_json,
cred_req_json: str,
cred_attrs: dict,
rr_size: int = None) -> (str, str):
"""
Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise).
"""
LOGGER.debug(
'Issuer.create_cred >>> cred_offer_json: %s, cred_req_json: %s, cred_attrs: %s, rr_size: %s',
cred_offer_json,
cred_req_json,
cred_attrs,
rr_size)
if not self.wallet.handle:
LOGGER.debug('Issuer.create_cred <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
cd_id = json.loads(cred_offer_json)['cred_def_id']
if not ok_cred_def_id(cd_id):
LOGGER.debug('Issuer.create_cred <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
cred_def = json.loads(await self.get_cred_def(cd_id)) # ensure cred def is in cache
if 'revocation' in cred_def['value']:
with REVO_CACHE.lock:
rr_id = Tails.current_rev_reg_id(self.dir_tails, cd_id)
tails = REVO_CACHE[rr_id].tails
assert tails # at (re)start, at cred def, Issuer sync_revoc_for_issue() sets this index in revo cache
try:
(cred_json, cred_revoc_id, _) = await anoncreds.issuer_create_credential( # issue by default to rr
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
rr_id,
tails.reader_handle)
rv = (cred_json, cred_revoc_id)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsRevocationRegistryFullError:
(tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id)
rr_id = rev_reg_id(cd_id, tag)
if self.rrbx:
await self._set_rev_reg(rr_id, rr_size)
else:
await self.rrb.create_rev_reg(rr_id, rr_size or rr_size_suggested)
await self._send_rev_reg_def(rr_id)
REVO_CACHE[rr_id].tails = await Tails(self.dir_tails, cd_id).open() # symlink OK now
return await self.create_cred(cred_offer_json, cred_req_json, cred_attrs)
LOGGER.debug('Issuer.create_cred <!< cannot create cred, indy error code %s', x_indy.error_code)
raise
else:
try:
(cred_json, _, _) = await anoncreds.issuer_create_credential(
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
None,
None)
rv = (cred_json, None)
except IndyError as x_indy:
LOGGER.debug('Issuer.create_cred <!< cannot create cred, indy error code %s', x_indy.error_code)
raise
LOGGER.debug('Issuer.create_cred <<< %s', rv)
return rv | Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise). | Below is the the instruction that describes the task:
### Input:
Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise).
### Response:
async def create_cred(
self,
cred_offer_json,
cred_req_json: str,
cred_attrs: dict,
rr_size: int = None) -> (str, str):
"""
Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise).
"""
LOGGER.debug(
'Issuer.create_cred >>> cred_offer_json: %s, cred_req_json: %s, cred_attrs: %s, rr_size: %s',
cred_offer_json,
cred_req_json,
cred_attrs,
rr_size)
if not self.wallet.handle:
LOGGER.debug('Issuer.create_cred <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
cd_id = json.loads(cred_offer_json)['cred_def_id']
if not ok_cred_def_id(cd_id):
LOGGER.debug('Issuer.create_cred <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
cred_def = json.loads(await self.get_cred_def(cd_id)) # ensure cred def is in cache
if 'revocation' in cred_def['value']:
with REVO_CACHE.lock:
rr_id = Tails.current_rev_reg_id(self.dir_tails, cd_id)
tails = REVO_CACHE[rr_id].tails
assert tails # at (re)start, at cred def, Issuer sync_revoc_for_issue() sets this index in revo cache
try:
(cred_json, cred_revoc_id, _) = await anoncreds.issuer_create_credential( # issue by default to rr
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
rr_id,
tails.reader_handle)
rv = (cred_json, cred_revoc_id)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsRevocationRegistryFullError:
(tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id)
rr_id = rev_reg_id(cd_id, tag)
if self.rrbx:
await self._set_rev_reg(rr_id, rr_size)
else:
await self.rrb.create_rev_reg(rr_id, rr_size or rr_size_suggested)
await self._send_rev_reg_def(rr_id)
REVO_CACHE[rr_id].tails = await Tails(self.dir_tails, cd_id).open() # symlink OK now
return await self.create_cred(cred_offer_json, cred_req_json, cred_attrs)
LOGGER.debug('Issuer.create_cred <!< cannot create cred, indy error code %s', x_indy.error_code)
raise
else:
try:
(cred_json, _, _) = await anoncreds.issuer_create_credential(
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
None,
None)
rv = (cred_json, None)
except IndyError as x_indy:
LOGGER.debug('Issuer.create_cred <!< cannot create cred, indy error code %s', x_indy.error_code)
raise
LOGGER.debug('Issuer.create_cred <<< %s', rv)
return rv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.