code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def recursive_dictionary_get(keys, dictionary):
"""
Gets contents of requirement key recursively so users can search
for specific keys inside nested requirement dicts.
:param keys: key or dot separated string of keys to look for.
:param dictionary: Dictionary to search from
:return: results of search or None
"""
if "." in keys and len(keys) > 1:
key = keys.split(".", 1)
new_dict = dictionary.get(key[0])
# Make sure that the next level actually has a dict we can continue the search from.
if not new_dict or not hasattr(new_dict, "get"):
return None
return recursive_dictionary_get(key[1], new_dict)
else:
return dictionary.get(keys) if (dictionary and hasattr(dictionary, "get")) else None
|
Gets contents of requirement key recursively so users can search
for specific keys inside nested requirement dicts.
:param keys: key or dot separated string of keys to look for.
:param dictionary: Dictionary to search from
:return: results of search or None
|
def html_temp_launch(html):
"""given text, make it a temporary HTML file and launch it."""
fname = tempfile.gettempdir()+"/swhlab/temp.html"
with open(fname,'w') as f:
f.write(html)
webbrowser.open(fname)
|
given text, make it a temporary HTML file and launch it.
|
def init_app(self, app):
"""
Register this extension with the flask app.
:param app: A flask application
"""
# Save this so we can use it later in the extension
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['flask-graphql-auth'] = self
self._set_default__configuration_options(app)
|
Register this extension with the flask app.
:param app: A flask application
|
def _simple_command(self, command, arg=None, **kwargs):
"""Send a simple command."""
self._protocol.send_command(command, arg)
return self._protocol.handle_simple_responses(**kwargs)
|
Send a simple command.
|
def open(self, vendor_id: int = 0x16c0, product_id: int = 0x5dc, bus: int = None, address: int = None) -> bool:
"""
Open the first device that matches the search criteria. Th default parameters
are set up for the likely most common case of a single uDMX interface.
However, for the case of multiple uDMX interfaces, you can use the
bus and address paramters to further specifiy the uDMX interface
to be opened.
:param vendor_id:
:param product_id:
:param bus: USB bus number 1-n
:param address: USB device address 1-n
:return: Returns true if a device was opened. Otherwise, returns false.
"""
kwargs = {}
if vendor_id:
kwargs["idVendor"] = vendor_id
if product_id:
kwargs["idProduct"] = product_id
if bus:
kwargs["bus"] = bus
if address:
kwargs["address"] = address
# Find the uDMX interface
self._dev = usb.core.find(**kwargs)
return self._dev is not None
|
Open the first device that matches the search criteria. Th default parameters
are set up for the likely most common case of a single uDMX interface.
However, for the case of multiple uDMX interfaces, you can use the
bus and address paramters to further specifiy the uDMX interface
to be opened.
:param vendor_id:
:param product_id:
:param bus: USB bus number 1-n
:param address: USB device address 1-n
:return: Returns true if a device was opened. Otherwise, returns false.
|
def remove(self, item):
"""
Removes the specified element's first occurrence from the list if it exists in this list.
:param item: (object), the specified element.
:return: (bool), ``true`` if the specified element is present in this list.
"""
check_not_none(item, "Value can't be None")
item_data = self._to_data(item)
return self._encode_invoke(list_remove_codec, value=item_data)
|
Removes the specified element's first occurrence from the list if it exists in this list.
:param item: (object), the specified element.
:return: (bool), ``true`` if the specified element is present in this list.
|
def close(self):
"""Should be closed[explicit] while using external session or connector,
instead of close by self.__del__."""
try:
if not self.session.closed:
if self.session._connector_owner:
self.session._connector.close()
self.session._connector = None
except Exception as e:
Config.dummy_logger.error("can not close session for: %s" % e)
|
Should be closed[explicit] while using external session or connector,
instead of close by self.__del__.
|
def _xml_element_value(el: Element, int_tags: list):
"""
Gets XML Element value.
:param el: Element
:param int_tags: List of tags that should be treated as ints
:return: value of the element (int/str)
"""
# None
if el.text is None:
return None
# int
try:
if el.tag in int_tags:
return int(el.text)
except:
pass
# default to str if not empty
s = str(el.text).strip()
return s if s else None
|
Gets XML Element value.
:param el: Element
:param int_tags: List of tags that should be treated as ints
:return: value of the element (int/str)
|
def find_bindmounts(self):
"""Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern`"""
for mountpoint, (orig, fs, opts) in self.mountpoints.items():
if 'bind' in opts and re.match(self.re_pattern, mountpoint):
yield mountpoint
|
Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern`
|
def is_data_dependent(fmto, data):
"""Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data"""
if callable(fmto.data_dependent):
return fmto.data_dependent(data)
return fmto.data_dependent
|
Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data
|
def add_tcp_flag(self, tcp_flag):
"""Add a single TCP flag - will be OR'd into the existing bitmask"""
if tcp_flag not in [1, 2, 4, 8, 16, 32, 64, 128]:
raise ValueError("Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]")
prev_size = 0
if self._json_dict.get('tcp_flags') is None:
self._json_dict['tcp_flags'] = 0
else:
prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon
self._json_dict['tcp_flags'] |= tcp_flag
# update size
new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon
self._size += new_size - prev_size
if prev_size == 0 and self._has_field:
# add the comma and space
self._size += 2
self._has_field = True
|
Add a single TCP flag - will be OR'd into the existing bitmask
|
def load_command_table(self, args): #pylint: disable=too-many-statements
"""Load all Service Fabric commands"""
# Need an empty client for the select and upload operations
with CommandSuperGroup(__name__, self,
'rcctl.custom_cluster#{}') as super_group:
with super_group.group('cluster') as group:
group.command('select', 'select')
with CommandSuperGroup(__name__, self, 'rcctl.custom_reliablecollections#{}',
client_factory=client_create) as super_group:
with super_group.group('dictionary') as group:
group.command('query', 'query_reliabledictionary')
group.command('execute', 'execute_reliabledictionary')
group.command('schema', 'get_reliabledictionary_schema')
group.command('list', 'get_reliabledictionary_list')
group.command('type-schema', 'get_reliabledictionary_type_schema')
with ArgumentsContext(self, 'dictionary') as ac:
ac.argument('application_name', options_list=['--application-name', '-a'])
ac.argument('service_name', options_list=['--service-name', '-s'])
ac.argument('dictionary_name', options_list=['--dictionary-name', '-d'])
ac.argument('output_file', options_list=['--output-file', '-out'])
ac.argument('input_file', options_list=['--input-file', '-in'])
ac.argument('query_string', options_list=['--query-string', '-q'])
ac.argument('type_name', options_list=['--type-name', '-t'])
return OrderedDict(self.command_table)
|
Load all Service Fabric commands
|
def handle_stranded_tasks(self, engine):
"""Deal with jobs resident in an engine that died."""
lost = self.pending[engine]
for msg_id in lost.keys():
if msg_id not in self.pending[engine]:
# prevent double-handling of messages
continue
raw_msg = lost[msg_id].raw_msg
idents,msg = self.session.feed_identities(raw_msg, copy=False)
parent = self.session.unpack(msg[1].bytes)
idents = [engine, idents[0]]
# build fake error reply
try:
raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
except:
content = error.wrap_exception()
# build fake header
header = dict(
status='error',
engine=engine,
date=datetime.now(),
)
msg = self.session.msg('apply_reply', content, parent=parent, subheader=header)
raw_reply = map(zmq.Message, self.session.serialize(msg, ident=idents))
# and dispatch it
self.dispatch_result(raw_reply)
# finally scrub completed/failed lists
self.completed.pop(engine)
self.failed.pop(engine)
|
Deal with jobs resident in an engine that died.
|
def syncFlags(self):
"""
Update the cached list of all enabled flags, and store it in the :attr:`flags` attribute.
"""
self.flags = set(self.skype.conn("GET", SkypeConnection.API_FLAGS,
auth=SkypeConnection.Auth.SkypeToken).json())
|
Update the cached list of all enabled flags, and store it in the :attr:`flags` attribute.
|
def live_profile(script, argv, profiler_factory, interval, spawn, signum,
pickle_protocol, mono):
"""Profile a Python script continuously."""
filename, code, globals_ = script
sys.argv[:] = [filename] + list(argv)
parent_sock, child_sock = socket.socketpair()
stderr_r_fd, stderr_w_fd = os.pipe()
pid = os.fork()
if pid:
# parent
os.close(stderr_w_fd)
viewer, loop = make_viewer(mono)
# loop.screen._term_output_file = open(os.devnull, 'w')
title = get_title(filename)
client = ProfilingClient(viewer, loop.event_loop, parent_sock, title)
client.start()
try:
loop.run()
except KeyboardInterrupt:
os.kill(pid, signal.SIGINT)
except BaseException:
# unexpected profiler error.
os.kill(pid, signal.SIGTERM)
raise
finally:
parent_sock.close()
# get exit code of child.
w_pid, status = os.waitpid(pid, os.WNOHANG)
if w_pid == 0:
os.kill(pid, signal.SIGTERM)
exit_code = os.WEXITSTATUS(status)
# print stderr of child.
with os.fdopen(stderr_r_fd, 'r') as f:
child_stderr = f.read()
if child_stderr:
sys.stdout.flush()
sys.stderr.write(child_stderr)
# exit with exit code of child.
sys.exit(exit_code)
else:
# child
os.close(stderr_r_fd)
# mute stdin, stdout.
devnull = os.open(os.devnull, os.O_RDWR)
for f in [sys.stdin, sys.stdout]:
os.dup2(devnull, f.fileno())
# redirect stderr to parent.
os.dup2(stderr_w_fd, sys.stderr.fileno())
frame = sys._getframe()
profiler = profiler_factory(base_frame=frame, base_code=code)
profiler_trigger = BackgroundProfiler(profiler, signum)
profiler_trigger.prepare()
server_args = (interval, noop, pickle_protocol)
server = SelectProfilingServer(None, profiler_trigger, *server_args)
server.clients.add(child_sock)
spawn(server.connected, child_sock)
try:
exec_(code, globals_)
finally:
os.close(stderr_w_fd)
child_sock.shutdown(socket.SHUT_WR)
|
Profile a Python script continuously.
|
def convert_sum(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting Sum ...')
def target_layer(x):
import keras.backend as K
return K.sum(x)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])
|
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
def bounded_uniform(cls, lowest, highest, weight_interval=None):
"""
Initialize with a uniform distribution between two values.
If no ``weight_interval`` is passed, this weight distribution
will just consist of ``[(lowest, 1), (highest, 1)]``. If specified,
weights (still with uniform weight distribution) will be added every
``weight_interval``. Use this if you intend to modify the weights
in any complex way after initialization.
Args:
lowest (float or int):
highest (float or int):
weight_interval (int):
Returns:
SoftFloat: A newly constructed instance.
"""
if weight_interval is None:
weights = [(lowest, 1), (highest, 1)]
else:
i = lowest
weights = []
while i < highest:
weights.append((i, 1))
i += weight_interval
weights.append((highest, 1))
return cls(weights)
|
Initialize with a uniform distribution between two values.
If no ``weight_interval`` is passed, this weight distribution
will just consist of ``[(lowest, 1), (highest, 1)]``. If specified,
weights (still with uniform weight distribution) will be added every
``weight_interval``. Use this if you intend to modify the weights
in any complex way after initialization.
Args:
lowest (float or int):
highest (float or int):
weight_interval (int):
Returns:
SoftFloat: A newly constructed instance.
|
def add_auth_to_method(self, path, method_name, auth, api):
"""
Adds auth settings for this path/method. Auth settings currently consist solely of Authorizers
but this method will eventually include setting other auth settings such as API Key,
Resource Policy, etc.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKey, ResourcePolicy (only Authorizers supported
currently)
:param dict api: Reference to the related Api's properties as defined in the template.
"""
method_authorizer = auth and auth.get('Authorizer')
if method_authorizer:
api_auth = api.get('Auth')
api_authorizers = api_auth and api_auth.get('Authorizers')
default_authorizer = api_auth and api_auth.get('DefaultAuthorizer')
self.set_method_authorizer(path, method_name, method_authorizer, api_authorizers, default_authorizer)
|
Adds auth settings for this path/method. Auth settings currently consist solely of Authorizers
but this method will eventually include setting other auth settings such as API Key,
Resource Policy, etc.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKey, ResourcePolicy (only Authorizers supported
currently)
:param dict api: Reference to the related Api's properties as defined in the template.
|
def get_providers(self, security_filter,
name_filter='%',
only_providers_flag='Y',
internal_external='I',
ordering_authority='',
real_provider='N'):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param security_filter - This is the EntryCode of the Security_Code_DE dictionary
for the providers being sought. A list of valid security codes can be obtained from
GetDictionary on the Security_Code_DE dictionary.
:param name_filter
:param only_providers_flag
:param internal_external
:param ordering_authority
:param real_provider
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PROVIDERS,
parameter1=security_filter,
parameter2=name_filter,
parameter3=only_providers_flag,
parameter4=internal_external,
parameter5=ordering_authority,
parameter6=real_provider)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PROVIDERS)
return result
|
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param security_filter - This is the EntryCode of the Security_Code_DE dictionary
for the providers being sought. A list of valid security codes can be obtained from
GetDictionary on the Security_Code_DE dictionary.
:param name_filter
:param only_providers_flag
:param internal_external
:param ordering_authority
:param real_provider
:return: JSON response
|
def _toplevel(cls):
"""Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A.
"""
superclasses = (
list(set(ClosureModel.__subclasses__()) &
set(cls._meta.get_parent_list()))
)
return next(iter(superclasses)) if superclasses else cls
|
Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A.
|
def register(self, renewable, timeout=300):
"""Register a renewable entity for automatic lock renewal.
:param renewable: A locked entity that needs to be renewed.
:type renewable: ~azure.servicebus.aio.async_message.Message or
~azure.servicebus.aio.async_receive_handler.SessionReceiver
:param timeout: A time in seconds that the lock should be maintained for.
Default value is 300 (5 minutes).
:type timeout: int
"""
starttime = renewable_start_time(renewable)
renew_future = asyncio.ensure_future(self._auto_lock_renew(renewable, starttime, timeout), loop=self.loop)
self._futures.append(renew_future)
|
Register a renewable entity for automatic lock renewal.
:param renewable: A locked entity that needs to be renewed.
:type renewable: ~azure.servicebus.aio.async_message.Message or
~azure.servicebus.aio.async_receive_handler.SessionReceiver
:param timeout: A time in seconds that the lock should be maintained for.
Default value is 300 (5 minutes).
:type timeout: int
|
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
|
Get string representation of dependency specification as provided by PythonDependencyParser.
|
def Zuo_Stenby(T, Tc, Pc, omega):
r'''Calculates air-water surface tension using the reference fluids
methods of [1]_.
.. math::
\sigma^{(1)} = 40.520(1-T_r)^{1.287}
\sigma^{(2)} = 52.095(1-T_r)^{1.21548}
\sigma_r = \sigma_r^{(1)}+ \frac{\omega - \omega^{(1)}}
{\omega^{(2)}-\omega^{(1)}} (\sigma_r^{(2)}-\sigma_r^{(1)})
\sigma = T_c^{1/3}P_c^{2/3}[\exp{(\sigma_r)} -1]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
omega : float
Acentric factor for fluid, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Presently untested. Have not personally checked the sources.
I strongly believe it is broken.
The reference values for methane and n-octane are from the DIPPR database.
Examples
--------
Chlorobenzene
>>> Zuo_Stenby(293., 633.0, 4530000.0, 0.249)
0.03345569011871088
References
----------
.. [1] Zuo, You-Xiang, and Erling H. Stenby. "Corresponding-States and
Parachor Models for the Calculation of Interfacial Tensions." The
Canadian Journal of Chemical Engineering 75, no. 6 (December 1, 1997):
1130-37. doi:10.1002/cjce.5450750617
'''
Tc_1, Pc_1, omega_1 = 190.56, 4599000.0/1E5, 0.012
Tc_2, Pc_2, omega_2 = 568.7, 2490000.0/1E5, 0.4
Pc = Pc/1E5
def ST_r(ST, Tc, Pc):
return log(1 + ST/(Tc**(1/3.0)*Pc**(2/3.0)))
ST_1 = 40.520*(1 - T/Tc)**1.287 # Methane
ST_2 = 52.095*(1 - T/Tc)**1.21548 # n-octane
ST_r_1, ST_r_2 = ST_r(ST_1, Tc_1, Pc_1), ST_r(ST_2, Tc_2, Pc_2)
sigma_r = ST_r_1 + (omega-omega_1)/(omega_2 - omega_1)*(ST_r_2-ST_r_1)
sigma = Tc**(1/3.0)*Pc**(2/3.0)*(exp(sigma_r)-1)
sigma = sigma/1000 # N/m, please
return sigma
|
r'''Calculates air-water surface tension using the reference fluids
methods of [1]_.
.. math::
\sigma^{(1)} = 40.520(1-T_r)^{1.287}
\sigma^{(2)} = 52.095(1-T_r)^{1.21548}
\sigma_r = \sigma_r^{(1)}+ \frac{\omega - \omega^{(1)}}
{\omega^{(2)}-\omega^{(1)}} (\sigma_r^{(2)}-\sigma_r^{(1)})
\sigma = T_c^{1/3}P_c^{2/3}[\exp{(\sigma_r)} -1]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
omega : float
Acentric factor for fluid, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Presently untested. Have not personally checked the sources.
I strongly believe it is broken.
The reference values for methane and n-octane are from the DIPPR database.
Examples
--------
Chlorobenzene
>>> Zuo_Stenby(293., 633.0, 4530000.0, 0.249)
0.03345569011871088
References
----------
.. [1] Zuo, You-Xiang, and Erling H. Stenby. "Corresponding-States and
Parachor Models for the Calculation of Interfacial Tensions." The
Canadian Journal of Chemical Engineering 75, no. 6 (December 1, 1997):
1130-37. doi:10.1002/cjce.5450750617
|
def create_tag(self, name):
"""
.. versionadded:: 0.2.0
Add a new tag resource to the account
:param str name: the name of the new tag
:rtype: Tag
:raises DOAPIError: if the API endpoint replies with an error
"""
return self._tag(self.request('/v2/tags', method='POST', data={
"name": name,
})["tag"])
|
.. versionadded:: 0.2.0
Add a new tag resource to the account
:param str name: the name of the new tag
:rtype: Tag
:raises DOAPIError: if the API endpoint replies with an error
|
def find_weights(self, scorer, test_size=0.2, method='SLSQP'):
"""Finds optimal weights for weighted average of models.
Parameters
----------
scorer : function
Scikit-learn like metric.
test_size : float, default 0.2
method : str
Type of solver. Should be one of:
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
Returns
-------
list
"""
p = Optimizer(self.models, test_size=test_size, scorer=scorer)
return p.minimize(method)
|
Finds optimal weights for weighted average of models.
Parameters
----------
scorer : function
Scikit-learn like metric.
test_size : float, default 0.2
method : str
Type of solver. Should be one of:
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
Returns
-------
list
|
def _modelmat(self, X, term=-1):
"""
Builds a model matrix, B, out of the spline basis for each feature
B = [B_0, B_1, ..., B_p]
Parameters
---------
X : array-like of shape (n_samples, m_features)
containing the input dataset
term : int, optional
term index for which to compute the model matrix
if -1, will create the model matrix for all features
Returns
-------
modelmat : sparse matrix of len n_samples
containing model matrix of the spline basis for selected features
"""
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self.terms.build_columns(X, term=term)
|
Builds a model matrix, B, out of the spline basis for each feature
B = [B_0, B_1, ..., B_p]
Parameters
---------
X : array-like of shape (n_samples, m_features)
containing the input dataset
term : int, optional
term index for which to compute the model matrix
if -1, will create the model matrix for all features
Returns
-------
modelmat : sparse matrix of len n_samples
containing model matrix of the spline basis for selected features
|
def upload_file_content(self, file_id, etag=None, source=None, content=None):
'''Upload a file content. The file entity must already exist.
If an ETag is provided the file stored on the server is verified
against it. If it does not match, StorageException is raised.
This means the client needs to update its knowledge of the resource
before attempting to update again. This can be used for optimistic
concurrency control.
Args:
file_id (str): The UUID of the file whose content is written.
etag (str): The etag to match the contents against.
source (str): The path of the local file whose content to be uploaded.
content (str): A string of the content to be uploaded.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
The ETag of the file upload::
'"71e1ed9ee52e565a56aec66bc648a32c"'
Raises:
IOError: The source cannot be opened.
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
'Invalid UUID for file_id: {0}'.format(file_id))
if not (source or content) or (source and content):
raise StorageArgumentException('Either one of source file or content '
'has to be provided.')
resp = self._authenticated_request \
.to_endpoint('file/{}/content/upload/'.format(file_id)) \
.with_body(content or open(source, 'rb')) \
.with_headers({'If-Match': etag} if etag else {}) \
.post()
if 'ETag' not in resp.headers:
raise StorageException('No ETag received from the service after the upload')
return resp.headers['ETag']
|
Upload a file content. The file entity must already exist.
If an ETag is provided the file stored on the server is verified
against it. If it does not match, StorageException is raised.
This means the client needs to update its knowledge of the resource
before attempting to update again. This can be used for optimistic
concurrency control.
Args:
file_id (str): The UUID of the file whose content is written.
etag (str): The etag to match the contents against.
source (str): The path of the local file whose content to be uploaded.
content (str): A string of the content to be uploaded.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
The ETag of the file upload::
'"71e1ed9ee52e565a56aec66bc648a32c"'
Raises:
IOError: The source cannot be opened.
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']:
"""Get another leaf node with name `val` if possible"""
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val))
|
Get another leaf node with name `val` if possible
|
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
"""
Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue).
"""
if not self._model_initialized_:
import warnings
warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning)
return False
x = self.optimizer_array.copy()
if not verbose:
# make sure only to test the selected parameters
if target_param is None:
transformed_index = np.arange(len(x))
else:
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
# just check the global ratio
dx = np.zeros(x.shape)
dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.)
# evaulate around the point x
f1 = self._objective(x + dx)
f2 = self._objective(x - dx)
gradient = self._grads(x)
dx = dx[transformed_index]
gradient = gradient[transformed_index]
denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator)
global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance)
if global_ratio is np.nan: # pragma: no cover
global_ratio = 0
return np.abs(1. - global_ratio) < tolerance or global_diff
else:
# check the gradient of each parameter individually, and do some pretty printing
try:
names = self.parameter_names_flat()
except NotImplementedError:
names = ['Variable %i' % i for i in range(len(x))]
# Prepare for pretty-printing
header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio']
max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])
float_len = 10
cols = [max_names]
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
header_string = list(map(lambda x: '|'.join(x), [header_string]))
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
if target_param is None:
target_param = self
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
for xind in zip(transformed_index):
xx = x.copy()
xx[xind] += step
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = float(self._objective(xx))
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
#the same
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
df_ratio = np.abs((f1 - f2) / min(f1, f2))
else: # pragma: no cover
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0): # pragma: no cover
ratio = (f1 - f2) == gradient[xind]
else:
ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[xind])
ret &= True
else: # pragma: no cover
formatted_name = "\033[91m {0} \033[0m".format(names[xind])
ret &= False
if df_unstable: # pragma: no cover
formatted_name = "\033[94m {0} \033[0m".format(names[xind])
r = '%.6f' % float(ratio)
d = '%.6f' % float(difference)
g = '%.6f' % gradient[xind]
ng = '%.6f' % float(numerical_gradient)
df = '%1.e' % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
print(grad_string)
self.optimizer_array = x
return ret
|
Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue).
|
def updateResults(self, newResults, **kwArgs):
"""
Update the results related to this request excluding the 'response'
and 'logEntries' values.
We specifically update (if present):
overallRC, rc, rs, errno.
Input:
Dictionary containing the results to be updated or an empty
dictionary the reset keyword was specified.
Reset keyword:
0 - Not a reset. This is the default is reset keyword was not
specified.
1 - Reset failure related items in the result dictionary.
This exclude responses and log entries.
2 - Reset all result items in the result dictionary.
Output:
Request handle is updated with the results.
"""
if 'reset' in kwArgs.keys():
reset = kwArgs['reset']
else:
reset = 0
if reset == 0:
# Not a reset. Set the keys from the provided dictionary.
for key in newResults.keys():
if key == 'response' or key == 'logEntries':
continue
self.results[key] = newResults[key]
elif reset == 1:
# Reset all failure related items.
self.results['overallRC'] = 0
self.results['rc'] = 0
self.results['rs'] = 0
self.results['errno'] = 0
self.results['strError'] = ''
elif reset == 2:
# Reset all results information including any responses and
# log entries.
self.results['overallRC'] = 0
self.results['rc'] = 0
self.results['rs'] = 0
self.results['errno'] = 0
self.results['strError'] = ''
self.results['logEntries'] = ''
self.results['response'] = ''
return
|
Update the results related to this request excluding the 'response'
and 'logEntries' values.
We specifically update (if present):
overallRC, rc, rs, errno.
Input:
Dictionary containing the results to be updated or an empty
dictionary the reset keyword was specified.
Reset keyword:
0 - Not a reset. This is the default is reset keyword was not
specified.
1 - Reset failure related items in the result dictionary.
This exclude responses and log entries.
2 - Reset all result items in the result dictionary.
Output:
Request handle is updated with the results.
|
def metadata(self):
"""
Retrieve the metadata info for this prefix
Returns:
dict: metadata info
"""
if self._metadata is None:
try:
with open(self.paths.metadata()) as metadata_fd:
self._metadata = json.load(metadata_fd)
except IOError:
self._metadata = {}
return self._metadata
|
Retrieve the metadata info for this prefix
Returns:
dict: metadata info
|
def _assemble_with_columns(self, sql_str, columns, *args, **kwargs):
"""
Format a select statement with specific columns
:sql_str: An SQL string template
:columns: The columns to be selected and put into {0}
:*args: Arguments to use as query parameters.
:returns: Psycopg2 compiled query
"""
# Handle any aliased columns we get (e.g. table_alias.column)
qcols = []
for col in columns:
if '.' in col:
# Explodeded it
wlist = col.split('.')
# Reassemble into string and drop it into the list
qcols.append(sql.SQL('.').join([sql.Identifier(x) for x in wlist]))
else:
qcols.append(sql.Identifier(col))
# sql.SQL(', ').join([sql.Identifier(x) for x in columns]),
query_string = sql.SQL(sql_str).format(
sql.SQL(', ').join(qcols),
*[sql.Literal(a) for a in args]
)
return query_string
|
Format a select statement with specific columns
:sql_str: An SQL string template
:columns: The columns to be selected and put into {0}
:*args: Arguments to use as query parameters.
:returns: Psycopg2 compiled query
|
def daemon_start(main, pidfile, daemon=True, workspace=None):
"""Start application in background mode if required and available. If not then in front mode.
"""
logger.debug("start daemon application pidfile={pidfile} daemon={daemon} workspace={workspace}.".format(pidfile=pidfile, daemon=daemon, workspace=workspace))
new_pid = os.getpid()
workspace = workspace or os.getcwd()
os.chdir(workspace)
daemon_flag = False
if pidfile and daemon:
old_pid = load_pid(pidfile)
if old_pid:
logger.debug("pidfile {pidfile} already exists, pid={pid}.".format(pidfile=pidfile, pid=old_pid))
# if old service is running, just exit.
if old_pid and is_running(old_pid):
error_message = "Service is running in process: {pid}.".format(pid=old_pid)
logger.error(error_message)
six.print_(error_message, file=os.sys.stderr)
os.sys.exit(95)
# clean old pid file.
clean_pid_file(pidfile)
# start as background mode if required and available.
if daemon and os.name == "posix":
make_basic_daemon()
daemon_flag = True
if daemon_flag:
logger.info("Start application in DAEMON mode, pidfile={pidfile} pid={pid}".format(pidfile=pidfile, pid=new_pid))
else:
logger.info("Start application in FRONT mode, pid={pid}.".format(pid=new_pid))
write_pidfile(pidfile)
atexit.register(clean_pid_file, pidfile)
main()
return
|
Start application in background mode if required and available. If not then in front mode.
|
def create_entity2user(enti_uid, user_id):
'''
create entity2user record in the database.
'''
record = TabEntity2User.select().where(
(TabEntity2User.entity_id == enti_uid) & (TabEntity2User.user_id == user_id)
)
if record.count() > 0:
record = record.get()
MEntity2User.count_increate(record.uid, record.count)
else:
TabEntity2User.create(
uid=tools.get_uuid(),
entity_id=enti_uid,
user_id=user_id,
count=1,
timestamp=time.time()
)
|
create entity2user record in the database.
|
def _yield_exercises(self):
"""A helper function to reduce the number of nested loops.
Yields
-------
(dynamic_ex) or (static_ex)
Yields the exercises in the program.
"""
for day in self.days:
for dynamic_ex in day.dynamic_exercises:
yield dynamic_ex
for static_ex in day.static_exercises:
yield static_ex
|
A helper function to reduce the number of nested loops.
Yields
-------
(dynamic_ex) or (static_ex)
Yields the exercises in the program.
|
def link_with_parents(self, parent, c_selectors, c_rules):
"""
Link with a parent for the current child rule.
If parents found, returns a list of parent rules to the child
"""
parent_found = None
for p_selectors, p_rules in self.parts.items():
_p_selectors, _, _ = p_selectors.partition(' extends ')
_p_selectors = _p_selectors.split(',')
new_selectors = set()
found = False
# Finds all the parent selectors and parent selectors with another
# bind selectors behind. For example, if `.specialClass extends
# .baseClass`,
# and there is a `.baseClass` selector, the extension should create
# `.specialClass` for that rule, but if there's also a `.baseClass
# a`
# it also should create `.specialClass a`
for p_selector in _p_selectors:
if parent in p_selector:
# get the new child selector to add (same as the parent
# selector but with the child name)
# since selectors can be together, separated with # or .
# (i.e. something.parent) check that too:
for c_selector in c_selectors.split(','):
# Get whatever is different between the two selectors:
_c_selector, _parent = c_selector, parent
lcp = self.longest_common_prefix(_c_selector, _parent)
if lcp:
_c_selector = _c_selector[lcp:]
_parent = _parent[lcp:]
lcs = self.longest_common_suffix(_c_selector, _parent)
if lcs:
_c_selector = _c_selector[:-lcs]
_parent = _parent[:-lcs]
if _c_selector and _parent:
# Get the new selectors:
prev_symbol = '(?<![#.:])' if _parent[
0] in ('#', '.', ':') else r'(?<![-\w#.:])'
post_symbol = r'(?![-\w])'
new_parent = re.sub(prev_symbol + _parent +
post_symbol, _c_selector, p_selector)
if p_selector != new_parent:
new_selectors.add(new_parent)
found = True
if found:
# add parent:
parent_found = parent_found or []
parent_found.extend(p_rules)
if new_selectors:
new_selectors = self.normalize_selectors(
p_selectors, new_selectors)
# rename node:
if new_selectors != p_selectors:
del self.parts[p_selectors]
self.parts.setdefault(new_selectors, [])
self.parts[new_selectors].extend(p_rules)
deps = set()
# save child dependencies:
for c_rule in c_rules or []:
c_rule[SELECTORS] = c_selectors # re-set the SELECTORS for the rules
deps.add(c_rule[POSITION])
for p_rule in p_rules:
p_rule[SELECTORS] = new_selectors # re-set the SELECTORS for the rules
p_rule[DEPS].update(
deps) # position is the "index" of the object
return parent_found
|
Link with a parent for the current child rule.
If parents found, returns a list of parent rules to the child
|
def match_member_id(self, member_conf, current_member_confs):
"""
Attempts to find an id for member_conf where fom current members confs
there exists a element.
Returns the id of an element of current confs
WHERE member_conf.host and element.host are EQUAL or map to same host
"""
if current_member_confs is None:
return None
for curr_mem_conf in current_member_confs:
if is_same_address(member_conf['host'], curr_mem_conf['host']):
return curr_mem_conf['_id']
return None
|
Attempts to find an id for member_conf where fom current members confs
there exists a element.
Returns the id of an element of current confs
WHERE member_conf.host and element.host are EQUAL or map to same host
|
def _load_tsv_variables(layout, suffix, dataset=None, columns=None,
prepend_type=False, scope='all', **selectors):
''' Reads variables from scans.tsv, sessions.tsv, and participants.tsv.
Args:
layout (BIDSLayout): The BIDSLayout to use.
suffix (str): The suffix of file to read from. Must be one of 'scans',
'sessions', or 'participants'.
dataset (NodeIndex): A BIDS NodeIndex container. If None, a new one is
initialized.
columns (list): Optional list of names specifying which columns in the
files to return. If None, all columns are returned.
prepend_type (bool): If True, variable names are prepended with the
type name (e.g., 'age' becomes 'participants.age').
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors (dict): Optional keyword arguments passed onto the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns: A NodeIndex instance.
'''
# Sanitize the selectors: only keep entities at current level or above
remap = {'scans': 'run', 'sessions': 'session', 'participants': 'subject'}
level = remap[suffix]
valid_entities = BASE_ENTITIES[:BASE_ENTITIES.index(level)]
layout_kwargs = {k: v for k, v in selectors.items() if k in valid_entities}
if dataset is None:
dataset = NodeIndex()
files = layout.get(extensions='.tsv', return_type='file', suffix=suffix,
scope=scope, **layout_kwargs)
for f in files:
f = layout.files[f]
_data = pd.read_csv(f.path, sep='\t')
# Entities can be defined either within the first column of the .tsv
# file (for entities that vary by row), or from the full file path
# (for entities constant over all rows in the file). We extract both
# and store them in the main DataFrame alongside other variables (as
# they'll be extracted when the BIDSVariable is initialized anyway).
for ent_name, ent_val in f.entities.items():
if ent_name in ALL_ENTITIES:
_data[ent_name] = ent_val
# Handling is a bit more convoluted for scans.tsv, because the first
# column contains the run filename, which we also need to parse.
if suffix == 'scans':
# Suffix is guaranteed to be present in each filename, so drop the
# constant column with value 'scans' to make way for it and prevent
# two 'suffix' columns.
_data.drop(columns='suffix', inplace=True)
image = _data['filename']
_data = _data.drop('filename', axis=1)
dn = f.dirname
paths = [join(dn, p) for p in image.values]
ent_recs = [layout.files[p].entities for p in paths
if p in layout.files]
ent_cols = pd.DataFrame.from_records(ent_recs)
_data = pd.concat([_data, ent_cols], axis=1, sort=True)
# It's possible to end up with duplicate entity columns this way
_data = _data.T.drop_duplicates().T
# The BIDS spec requires ID columns to be named 'session_id', 'run_id',
# etc., and IDs begin with entity prefixes (e.g., 'sub-01'). To ensure
# consistent internal handling, we strip these suffixes and prefixes.
elif suffix == 'sessions':
_data = _data.rename(columns={'session_id': 'session'})
_data['session'] = _data['session'].str.replace('ses-', '')
elif suffix == 'participants':
_data = _data.rename(columns={'participant_id': 'subject'})
_data['subject'] = _data['subject'].str.replace('sub-', '')
def make_patt(x, regex_search=False):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
# Filter rows on all selectors
comm_cols = list(set(_data.columns) & set(selectors.keys()))
for col in comm_cols:
ent_patts = [make_patt(x, regex_search=layout.regex_search)
for x in listify(selectors.get(col))]
patt = '|'.join(ent_patts)
_data = _data[_data[col].str.contains(patt)]
level = {'scans': 'session', 'sessions': 'subject',
'participants': 'dataset'}[suffix]
node = dataset.get_or_create_node(level, f.entities)
ent_cols = list(set(ALL_ENTITIES) & set(_data.columns))
amp_cols = list(set(_data.columns) - set(ent_cols))
if columns is not None:
amp_cols = list(set(amp_cols) & set(columns))
for col_name in amp_cols:
# Rename colummns: values must be in 'amplitude'
df = _data.loc[:, [col_name] + ent_cols]
df.columns = ['amplitude'] + ent_cols
if prepend_type:
col_name = '%s.%s' % (suffix, col_name)
node.add_variable(SimpleVariable(name=col_name, data=df, source=suffix))
return dataset
|
Reads variables from scans.tsv, sessions.tsv, and participants.tsv.
Args:
layout (BIDSLayout): The BIDSLayout to use.
suffix (str): The suffix of file to read from. Must be one of 'scans',
'sessions', or 'participants'.
dataset (NodeIndex): A BIDS NodeIndex container. If None, a new one is
initialized.
columns (list): Optional list of names specifying which columns in the
files to return. If None, all columns are returned.
prepend_type (bool): If True, variable names are prepended with the
type name (e.g., 'age' becomes 'participants.age').
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors (dict): Optional keyword arguments passed onto the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns: A NodeIndex instance.
|
def get_field(name, data, default="object", document_object_field=None, is_document=False):
"""
Return a valid Field by given data
"""
if isinstance(data, AbstractField):
return data
data = keys_to_string(data)
_type = data.get('type', default)
if _type == "string":
return StringField(name=name, **data)
elif _type == "binary":
return BinaryField(name=name, **data)
elif _type == "boolean":
return BooleanField(name=name, **data)
elif _type == "byte":
return ByteField(name=name, **data)
elif _type == "short":
return ShortField(name=name, **data)
elif _type == "integer":
return IntegerField(name=name, **data)
elif _type == "long":
return LongField(name=name, **data)
elif _type == "float":
return FloatField(name=name, **data)
elif _type == "double":
return DoubleField(name=name, **data)
elif _type == "ip":
return IpField(name=name, **data)
elif _type == "date":
return DateField(name=name, **data)
elif _type == "multi_field":
return MultiField(name=name, **data)
elif _type == "geo_point":
return GeoPointField(name=name, **data)
elif _type == "attachment":
return AttachmentField(name=name, **data)
elif is_document or _type == "document":
if document_object_field:
return document_object_field(name=name, **data)
else:
data.pop("name",None)
return DocumentObjectField(name=name, **data)
elif _type == "object":
if '_timestamp' in data or "_all" in data:
if document_object_field:
return document_object_field(name=name, **data)
else:
return DocumentObjectField(name=name, **data)
return ObjectField(name=name, **data)
elif _type == "nested":
return NestedObject(name=name, **data)
raise RuntimeError("Invalid type: %s" % _type)
|
Return a valid Field by given data
|
def execute(self, task):
"""
Given a task instance, this runs it.
This includes handling retries & re-raising exceptions.
Ex::
task = Task(async=False, retries=5)
task.to_call(add, 101, 35)
finished_task = gator.execute(task)
:param task_id: The identifier of the task to process
:type task_id: string
:returns: The completed ``Task`` instance
"""
try:
return task.run()
except Exception:
if task.retries > 0:
task.retries -= 1
task.to_retrying()
if task.async:
# Place it back on the queue.
data = task.serialize()
task.task_id = self.backend.push(
self.queue_name,
task.task_id,
data
)
else:
return self.execute(task)
else:
raise
|
Given a task instance, this runs it.
This includes handling retries & re-raising exceptions.
Ex::
task = Task(async=False, retries=5)
task.to_call(add, 101, 35)
finished_task = gator.execute(task)
:param task_id: The identifier of the task to process
:type task_id: string
:returns: The completed ``Task`` instance
|
def auth_required(self):
"""
If any ancestor required an authentication, this node needs it too.
"""
if self._auth:
return self._auth, self
return self.__parent__.auth_required()
|
If any ancestor required an authentication, this node needs it too.
|
def DeleteInstance(r, instance, dry_run=False):
"""
Deletes an instance.
@type instance: str
@param instance: the instance to delete
@rtype: int
@return: job id
"""
return r.request("delete", "/2/instances/%s" % instance,
query={"dry-run": dry_run})
|
Deletes an instance.
@type instance: str
@param instance: the instance to delete
@rtype: int
@return: job id
|
def check_email_syntax (self, mail):
"""Check email syntax. The relevant RFCs:
- How to check names (memo):
http://tools.ietf.org/html/rfc3696
- Email address syntax
http://tools.ietf.org/html/rfc2822
- SMTP protocol
http://tools.ietf.org/html/rfc5321#section-4.1.3
- IPv6
http://tools.ietf.org/html/rfc4291#section-2.2
- Host syntax
http://tools.ietf.org/html/rfc1123#section-2
"""
# length checks
# restrict email length to 256 characters
# http://www.rfc-editor.org/errata_search.php?eid=1003
if len(mail) > 256:
self.set_result(_("Mail address `%(addr)s' too long. Allowed 256 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(mail)}, valid=False, overwrite=False)
return
if "@" not in mail:
self.set_result(_("Missing `@' in mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
# note: be sure to use rsplit since "@" can occur in local part
local, domain = mail.rsplit("@", 1)
if not local:
self.set_result(_("Missing local part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if not domain:
self.set_result(_("Missing domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if len(local) > 64:
self.set_result(_("Local part of mail address `%(addr)s' too long. Allowed 64 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(local)}, valid=False, overwrite=False)
return
if len(domain) > 255:
self.set_result(_("Domain part of mail address `%(addr)s' too long. Allowed 255 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(local)}, valid=False, overwrite=False)
return
# local part syntax check
# Rules taken from http://tools.ietf.org/html/rfc3696#section-3
if is_quoted(local):
if is_missing_quote(local):
self.set_result(_("Unquoted double quote or backslash in mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
else:
if local.startswith(u"."):
self.set_result(_("Local part of mail address `%(addr)s' may not start with a dot.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if local.endswith(u"."):
self.set_result(_("Local part of mail address `%(addr)s' may not end with a dot.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if u".." in local:
self.set_result(_("Local part of mail address `%(addr)s' may not contain two dots.") % \
{"addr": mail}, valid=False, overwrite=False)
return
for char in u'@ \\",[]':
if char in local.replace(u"\\%s"%char, u""):
self.set_result(_("Local part of mail address `%(addr)s' contains unquoted character `%(char)s.") % \
{"addr": mail, "char": char}, valid=False, overwrite=False)
return
# domain part syntax check
if is_literal(domain):
# it's an IP address
ip = domain[1:-1]
if ip.startswith(u"IPv6:"):
ip = ip[5:]
if not iputil.is_valid_ip(ip):
self.set_result(_("Domain part of mail address `%(addr)s' has invalid IP.") % \
{"addr": mail}, valid=False, overwrite=False)
return
else:
# it's a domain name
if not urlutil.is_safe_domain(domain):
self.set_result(_("Invalid domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if domain.endswith(".") or domain.split(".")[-1].isdigit():
self.set_result(_("Invalid top level domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
|
Check email syntax. The relevant RFCs:
- How to check names (memo):
http://tools.ietf.org/html/rfc3696
- Email address syntax
http://tools.ietf.org/html/rfc2822
- SMTP protocol
http://tools.ietf.org/html/rfc5321#section-4.1.3
- IPv6
http://tools.ietf.org/html/rfc4291#section-2.2
- Host syntax
http://tools.ietf.org/html/rfc1123#section-2
|
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
|
Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
|
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
|
Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`.
|
def axis_bounds(self) -> Dict[str, Tuple[float, float]]:
""" The (minimum, maximum) bounds for each axis. """
return {ax: (0, pos+0.5) for ax, pos in _HOME_POSITION.items()
if ax not in 'BC'}
|
The (minimum, maximum) bounds for each axis.
|
def fmt_duration(secs):
"""Format a duration in seconds."""
return ' '.join(fmt.human_duration(secs, 0, precision=2, short=True).strip().split())
|
Format a duration in seconds.
|
def login_required(request):
"Lookup decorator to require the user to be authenticated."
user = getattr(request, 'user', None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401)
|
Lookup decorator to require the user to be authenticated.
|
def loadResults(resultsFile):
"""returns a dict of active folders with days as keys."""
with open(resultsFile) as f:
raw=f.read().split("\n")
foldersByDay={}
for line in raw:
folder=line.split('"')[1]+"\\"
line=[]+line.split('"')[2].split(", ")
for day in line[1:]:
if not day in foldersByDay:
foldersByDay[day]=[]
foldersByDay[day]=foldersByDay[day]+[folder]
nActiveDays=len(foldersByDay)
dayFirst=sorted(foldersByDay.keys())[0]
dayLast=sorted(foldersByDay.keys())[-1]
dayFirst=datetime.datetime.strptime(dayFirst, "%Y-%m-%d" )
dayLast=datetime.datetime.strptime(dayLast, "%Y-%m-%d" )
nDays = (dayLast - dayFirst).days + 1
emptyDays=0
for deltaDays in range(nDays):
day=dayFirst+datetime.timedelta(days=deltaDays)
stamp=datetime.datetime.strftime(day, "%Y-%m-%d" )
if not stamp in foldersByDay:
foldersByDay[stamp]=[]
emptyDays+=1
percActive=nActiveDays/nDays*100
print("%d of %d days were active (%.02f%%)"%(nActiveDays,nDays,percActive))
return foldersByDay
|
returns a dict of active folders with days as keys.
|
def _build(self,
input_batch,
is_training,
test_local_stats=False):
"""Connects the BatchNormV2 module into the graph.
Args:
input_batch: A Tensor of the same dimension as `len(data_format)`.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if local batch statistics should
be used when `is_training=False`. If not, moving averages are used.
By default `False`. Can be a Tensor.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
base.IncompatibleShapeError: If `data_format` is not valid for the
input shape.
base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.
"""
input_shape = input_batch.get_shape()
if not self._data_format:
if len(input_shape) == 2:
self._data_format = "NC"
elif len(input_shape) == 3:
self._data_format = "NWC"
elif len(input_shape) == 4:
self._data_format = "NHWC"
elif len(input_shape) == 5:
self._data_format = "NDHWC"
else:
raise base.IncompatibleShapeError(
"Input shape {} has too many or too few dimensions.".format(
input_shape))
self._channel_index = self._data_format.index("C")
# Use list to turn range into iterator in python3.
self._axis = list(range(len(self._data_format)))
del self._axis[self._channel_index]
if len(self._data_format) != len(input_shape):
raise base.IncompatibleShapeError(
"Incorrect data format {} for input shape {}.".format(
self._data_format, input_shape))
dtype = input_batch.dtype.base_dtype
if self._fused and dtype == tf.bfloat16:
raise base.NotSupportedError(
"Fused batch norm does not support tf.bfloat16.")
# Maintain moving averages at a minimum precision of tf.float32.
stat_dtype = tf.float32 if dtype in [tf.float16, tf.bfloat16] else dtype
self._num_channels = int(input_shape[self._channel_index])
if self._channel_index == 1:
self._image_shape = [int(x) for x in input_shape[2:]]
else:
self._image_shape = [int(x) for x in input_shape[1:-1]]
self._expanded_mean_shape = [1] * len(input_shape)
self._expanded_mean_shape[self._channel_index] = self._num_channels
use_batch_stats = is_training | test_local_stats
mean, variance = self._build_statistics(input_batch, use_batch_stats,
stat_dtype)
# Sets up optional gamma and beta parameters
self._build_scale_offset(dtype)
# Sets up the batch normalization op.
out, mean, variance = self._batch_norm_op(input_batch, mean, variance,
use_batch_stats, stat_dtype)
# Sets up the update op.
update_ops = self._build_update_ops(mean, variance, is_training)
# Put update ops in the update ops collection if given, otherwise add as
# control dependencies of the output.
if update_ops:
if self._update_ops_collection:
for update_op in update_ops:
tf.add_to_collection(self._update_ops_collection, update_op)
else:
with tf.control_dependencies(update_ops):
out = tf.identity(out)
return out
|
Connects the BatchNormV2 module into the graph.
Args:
input_batch: A Tensor of the same dimension as `len(data_format)`.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if local batch statistics should
be used when `is_training=False`. If not, moving averages are used.
By default `False`. Can be a Tensor.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
base.IncompatibleShapeError: If `data_format` is not valid for the
input shape.
base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.
|
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
|
Take a proxy url and break it up to its parts
|
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_vpcs()
subnet_to_az = self._find_usage_subnets()
self._find_usage_ACLs()
self._find_usage_route_tables()
self._find_usage_gateways()
self._find_usage_nat_gateways(subnet_to_az)
self._find_usages_vpn_gateways()
self._find_usage_network_interfaces()
self._have_usage = True
logger.debug("Done checking usage.")
|
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
|
def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run peuker-douglas function"""
fname = TauDEM.func_name('peukerdouglas')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel}, workingdir,
None,
{'-ss': streamSkeleton},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file})
|
Run peuker-douglas function
|
def process_needlist(app, doctree, fromdocname):
"""
Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location.
"""
env = app.builder.env
for node in doctree.traverse(Needlist):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ('ids', 'names', 'classes', 'dupnames'):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needfilter = env.need_all_needlists[id]
all_needs = env.needs_all_needs
content = []
all_needs = list(all_needs.values())
if current_needfilter["sort_by"] is not None:
if current_needfilter["sort_by"] == "id":
all_needs = sorted(all_needs, key=lambda node: node["id"])
elif current_needfilter["sort_by"] == "status":
all_needs = sorted(all_needs, key=status_sorter)
found_needs = procces_filters(all_needs, current_needfilter)
line_block = nodes.line_block()
for need_info in found_needs:
para = nodes.line()
description = "%s: %s" % (need_info["id"], need_info["title"])
if current_needfilter["show_status"] and need_info["status"] is not None:
description += " (%s)" % need_info["status"]
if current_needfilter["show_tags"] and need_info["tags"] is not None:
description += " [%s]" % "; ".join(need_info["tags"])
title = nodes.Text(description, description)
# Create a reference
if not need_info["hide"]:
ref = nodes.reference('', '')
ref['refdocname'] = need_info['docname']
ref['refuri'] = app.builder.get_relative_uri(
fromdocname, need_info['docname'])
ref['refuri'] += '#' + need_info['target_node']['refid']
ref.append(title)
para += ref
else:
para += title
line_block.append(para)
content.append(line_block)
if len(content) == 0:
content.append(no_needs_found_paragraph())
if current_needfilter["show_filters"]:
content.append(used_filter_paragraph(current_needfilter))
node.replace_self(content)
|
Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location.
|
def generate_exercises_from_importstudioid(self, args, options):
"""
Create rows in Exercises.csv and ExerciseQuestions.csv from a Studio channel,
specified based on a studio_id (e.g. studio_id of main_tree for some channel)'
"""
print('Generating Exercises.csv and ExerciseQuestions.csv from a Studio channel')
self.studioapi = StudioApi(token=args['token'])
channel_dict = self.studioapi.get_tree_for_studio_id(args['importstudioid'])
json.dump(channel_dict, open('chefdata/studiotree.json', 'w'), indent=4, ensure_ascii=False, sort_keys=True)
soure_ids_seen = []
def _generate_source_id(subtree):
"""
Creates a Source ID form title and ensures it is unique withing channel.
"""
candidate = subtree['title'].replace(' ', '_')
if candidate not in soure_ids_seen:
source_id = candidate
soure_ids_seen.append(source_id)
else:
source_id = candidate + subtree['node_id'][0:7]
soure_ids_seen.append(source_id)
return source_id
def _write_subtree(path_tuple, subtree, is_root=False):
print(' '*len(path_tuple) + ' - ', subtree['title'])
kind = subtree['kind']
# TOPIC ############################################################
if kind == 'topic':
if is_root:
self.write_topic_row_from_studio_dict(path_tuple, subtree, is_root=is_root)
for child in subtree['children']:
_write_subtree(path_tuple, child)
else:
self.write_topic_row_from_studio_dict(path_tuple, subtree)
for child in subtree['children']:
_write_subtree(path_tuple+[subtree['title']], child)
# EXERCISE #########################################################
elif kind == 'exercise':
source_id = _generate_source_id(subtree)
self.write_exercice_row_from_studio_dict(path_tuple, subtree, source_id)
for question_dict in subtree['assessment_items']:
self.write_question_row_from_question_dict(source_id, question_dict)
else:
print('skipping node', subtree['title'])
path_tuple = [ self.channeldir.split('/')[-1] ]
_write_subtree(path_tuple, channel_dict, is_root=True)
|
Create rows in Exercises.csv and ExerciseQuestions.csv from a Studio channel,
specified based on a studio_id (e.g. studio_id of main_tree for some channel)'
|
def query_sequence_length(self):
""" does not include hard clipped"""
if self.entries.seq: return len(self.entries.seq)
if not self.entries.cigar:
raise ValueError('Cannot give a query length if no cigar and no query sequence are present')
return sum([x[0] for x in self.cigar_array if re.match('[MIS=X]',x[1])])
|
does not include hard clipped
|
def _iparam_objectname(objectname, arg_name):
"""
Convert an object name (= class or instance name) specified in an
operation method into a CIM object that can be passed to
imethodcall().
"""
if isinstance(objectname, (CIMClassName, CIMInstanceName)):
objectname = objectname.copy()
objectname.host = None
objectname.namespace = None
elif isinstance(objectname, six.string_types):
objectname = CIMClassName(objectname)
elif objectname is None:
pass
else:
raise TypeError(
_format("The {0!A} argument of the WBEMConnection operation "
"has invalid type {1} (must be None, a string, a "
"CIMClassName, or a CIMInstanceName)",
arg_name, type(objectname)))
return objectname
|
Convert an object name (= class or instance name) specified in an
operation method into a CIM object that can be passed to
imethodcall().
|
def loess_inline(h,x,cut,nan=True):
"""
#This is a raw, inline version of the loess filtering function. Runs much more slowlier.
"""
n=np.size(h)
#Start filtering if there are more than 1 point
if n == 1 : lf = h[0]
else : pass
#Get the half-width of filter window
l_c=cut/2.0
#Initiate weights and outputs
w=np.zeros(n)
lf = np.repeat(np.NaN,n)
#Get valid points in serie
flag = ~np.isnan(h)
fcnt = flag.sum()
fnt = np.arange(n).compress(flag)
# WHERE(FINITE(h),fcnt)
#Loop from 1st to last valid point
for i in fnt :
icur=i
#Get Q (distance from central point divided by half-window width)
q=(np.abs((x-x[icur])/l_c))
#Compute S from Q�
s = (1.0 - q*q*q)
#Set all values out of filter window to 0
outOfFilter_flag = q > 1.0
outCnt = outOfFilter_flag.sum()
outOfFilter = np.arange(n).compress(outOfFilter_flag)
if (outCnt > 0) : s[outOfFilter]=0.0
#Compute weights (S� - Tricube)
w=s*s*s
sumvar=np.nansum(w)
#Compute current height (heights times weigths, divided by sum of weights)
lf[icur]=np.nansum(w*h)/sumvar
return lf
|
#This is a raw, inline version of the loess filtering function. Runs much more slowlier.
|
def example4():
""" Example 4: Transforming coordinates using affine matrix. """
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster
# Transform keypoint coordinates
coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
def imwrite(image, coords_list, name):
coords_list_ = []
for coords in coords_list:
coords = np.array(coords, np.int32)
coords = coords.reshape((-1, 1, 2))
coords_list_.append(coords)
image = cv2.polylines(image, coords_list_, True, (0, 255, 255), 3)
cv2.imwrite(name, image[..., ::-1])
imwrite(image, coords, '_with_keypoints_origin.png')
imwrite(result, coords_result, '_with_keypoints_result.png')
|
Example 4: Transforming coordinates using affine matrix.
|
def assertDateTimesLagEqual(self, sequence, lag, msg=None):
'''Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertEqual(target - max(sequence), lag, msg=msg)
|
Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
|
def main(argv=None):
"""Run Preprocessing as a Dataflow."""
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output, 'tmp')
if args.cloud:
pipeline_name = 'DataflowRunner'
else:
pipeline_name = 'DirectRunner'
# Suppress TF warnings.
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
options = {
'job_name': args.job_name,
'temp_location': temp_dir,
'project': args.project_id,
'setup_file':
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'setup.py')),
}
if args.num_workers:
options['num_workers'] = args.num_workers
if args.worker_machine_type:
options['worker_machine_type'] = args.worker_machine_type
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(pipeline_name, options=pipeline_options)
preprocess(pipeline=p, args=args)
pipeline_result = p.run()
if not args.async:
pipeline_result.wait_until_finish()
if args.async and args.cloud:
print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' %
(pipeline_result.job_id(), args.project_id))
|
Run Preprocessing as a Dataflow.
|
def load(self, path):
"""Loads the data from `path` into the catalogue.
:param path: path to catalogue file
:type path: `str`
"""
fieldnames = ['work', 'label']
with open(path, 'r', encoding='utf-8', newline='') as fh:
reader = csv.DictReader(fh, delimiter=' ', fieldnames=fieldnames,
skipinitialspace=True)
for row in reader:
work, label = row['work'], row['label']
if label:
if label not in self._ordered_labels:
self._ordered_labels.append(label)
if work in self:
raise MalformedCatalogueError(
CATALOGUE_WORK_RELABELLED_ERROR.format(work))
self[work] = label
|
Loads the data from `path` into the catalogue.
:param path: path to catalogue file
:type path: `str`
|
def pluginSetting(name, namespace=None, typ=None):
'''
Returns the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace
'''
def _find_in_cache(name, key):
for setting in _settings[namespace]:
if setting["name"] == name:
return setting[key]
return None
def _type_map(t):
"""Return setting python type"""
if t == BOOL:
return bool
elif t == NUMBER:
return float
else:
return unicode
namespace = namespace or _callerName().split(".")[0]
full_name = namespace + "/" + name
if settings.contains(full_name):
if typ is None:
typ = _type_map(_find_in_cache(name, 'type'))
v = settings.value(full_name, None, type=typ)
try:
if isinstance(v, QPyNullVariant):
v = None
except:
pass
return v
else:
return _find_in_cache(name, 'default')
|
Returns the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace
|
def watch_statuses(self, observer, batch_ids):
"""Allows a component to register to be notified when a set of
batches is no longer PENDING. Expects to be able to call the
"notify_batches_finished" method on the registered component, sending
the statuses of the batches.
Args:
observer (object): Must implement "notify_batches_finished" method
batch_ids (list of str): The ids of the batches to watch
"""
with self._lock:
statuses = self.get_statuses(batch_ids)
if self._has_no_pendings(statuses):
observer.notify_batches_finished(statuses)
else:
self._observers[observer] = statuses
|
Allows a component to register to be notified when a set of
batches is no longer PENDING. Expects to be able to call the
"notify_batches_finished" method on the registered component, sending
the statuses of the batches.
Args:
observer (object): Must implement "notify_batches_finished" method
batch_ids (list of str): The ids of the batches to watch
|
def _band_calculations(self, rsr, flux, scale, **options):
"""Derive the inband solar flux or inband solar irradiance for a given
instrument relative spectral response valid for an earth-sun distance
of one AU.
rsr: Relative Spectral Response (one detector only)
Dictionary with two members 'wavelength' and 'response'
options:
detector: Detector number (between 1 and N - N=number of detectors
for channel)
"""
from scipy.interpolate import InterpolatedUnivariateSpline
if 'detector' in options:
detector = options['detector']
else:
detector = 1
# Resample/Interpolate the response curve:
if self.wavespace == 'wavelength':
if 'response' in rsr:
wvl = rsr['wavelength'] * scale
resp = rsr['response']
else:
wvl = rsr['det-{0:d}'.format(detector)]['wavelength'] * scale
resp = rsr['det-{0:d}'.format(detector)]['response']
else:
if 'response' in rsr:
wvl = rsr['wavenumber'] * scale
resp = rsr['response']
else:
wvl = rsr['det-{0:d}'.format(detector)]['wavenumber'] * scale
resp = rsr['det-{0:d}'.format(detector)]['response']
start = wvl[0]
end = wvl[-1]
# print "Start and end: ", start, end
LOG.debug("Begin and end wavelength/wavenumber: %f %f ", start, end)
dlambda = self._dlambda
xspl = np.linspace(start, end, round((end - start) / self._dlambda) + 1)
ius = InterpolatedUnivariateSpline(wvl, resp)
resp_ipol = ius(xspl)
# Interpolate solar spectrum to specified resolution and over specified
# Spectral interval:
self.interpolate(dlambda=dlambda, ival_wavelength=(start, end))
# Mask out outside the response curve:
maskidx = np.logical_and(np.greater_equal(self.ipol_wavelength, start),
np.less_equal(self.ipol_wavelength, end))
wvl = np.repeat(self.ipol_wavelength, maskidx)
irr = np.repeat(self.ipol_irradiance, maskidx)
# Calculate the solar-flux: (w/m2)
if flux:
return np.trapz(irr * resp_ipol, wvl)
else:
# Divide by the equivalent band width:
return np.trapz(irr * resp_ipol, wvl) / np.trapz(resp_ipol, wvl)
|
Derive the inband solar flux or inband solar irradiance for a given
instrument relative spectral response valid for an earth-sun distance
of one AU.
rsr: Relative Spectral Response (one detector only)
Dictionary with two members 'wavelength' and 'response'
options:
detector: Detector number (between 1 and N - N=number of detectors
for channel)
|
def render(request, template_name, context=None, content_type=None, status=None, using=None, logs=None):
"""
Wrapper around Django render method. Can take one or a list of logs and logs the response.
No overhead if no logs are passed.
"""
if logs:
obj_logger = ObjectLogger()
if not isinstance(logs, list):
logs = [logs, ]
for log in logs:
log = obj_logger.log_response(
log,
context,
status=str(status),
headers='',
content_type=str(content_type))
log.save()
return django_render(
request,
template_name,
context=context,
content_type=content_type,
status=status,
using=using)
|
Wrapper around Django render method. Can take one or a list of logs and logs the response.
No overhead if no logs are passed.
|
def _parse_led(self, keypad, component_xml):
"""Parses an LED device that part of a keypad."""
component_num = int(component_xml.get('ComponentNumber'))
led_num = component_num - 80
led = Led(self._lutron, keypad,
name=('LED %d' % led_num),
led_num=led_num,
component_num=component_num)
return led
|
Parses an LED device that part of a keypad.
|
def do_wait(coro: Callable) -> Any:
"""
Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result
"""
event_loop = None
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(coro)
|
Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result
|
def make_all_uppercase(
lst: Union[list, tuple, str, set]
) -> Union[list, tuple, str, set]:
"""Make all characters uppercase.
It supports characters in a (mix of) list, tuple, set or string.
The return value is of the same type of the input value.
"""
if not isinstance(lst, (list, tuple, str, set)):
raise TypeError('lst must be a list, a tuple, a set or a string')
if isinstance(lst, str):
return lst.upper()
arr = list(lst)
# enumerate is 70% slower than range
# for i in range(len(lst)):
# if isinstance(arr[i], (list, tuple, str, set)):
# arr[i] = Aux.make_all_uppercase(arr[i])
arr[:] = [
Aux.make_all_uppercase(element) if (
isinstance(element, (list, tuple, str, set))
) else element for element in arr
]
if isinstance(lst, set):
return set(arr)
elif isinstance(lst, tuple):
return tuple(arr)
return arr
|
Make all characters uppercase.
It supports characters in a (mix of) list, tuple, set or string.
The return value is of the same type of the input value.
|
def namedTempFileReader(self) -> NamedTempFileReader:
""" Named Temporary File Reader
This provides an object compatible with NamedTemporaryFile, used for reading this
files contents. This will still delete after the object falls out of scope.
This solves the problem on windows where a NamedTemporaryFile can not be read
while it's being written to
"""
# Get the weak ref
directory = self._directory()
assert isinstance(directory, Directory), (
"Expected Directory, receieved %s" % directory)
# Return the object
return NamedTempFileReader(directory, self)
|
Named Temporary File Reader
This provides an object compatible with NamedTemporaryFile, used for reading this
files contents. This will still delete after the object falls out of scope.
This solves the problem on windows where a NamedTemporaryFile can not be read
while it's being written to
|
def check_data_complete(data, parameter_columns):
""" For any parameters specified with edges, make sure edges
don't overlap and don't have any gaps. Assumes that edges are
specified with ends and starts overlapping (but one exclusive and
the other inclusive) so can check that end of previous == start
of current.
If multiple parameters, make sure all combinations of parameters
are present in data."""
param_edges = [p[1:] for p in parameter_columns if isinstance(p, (Tuple, List))] # strip out call column name
# check no overlaps/gaps
for p in param_edges:
other_params = [p_ed[0] for p_ed in param_edges if p_ed != p]
if other_params:
sub_tables = data.groupby(list(other_params))
else:
sub_tables = {None: data}.items()
n_p_total = len(set(data[p[0]]))
for _, table in sub_tables:
param_data = table[[p[0], p[1]]].copy().sort_values(by=p[0])
start, end = param_data[p[0]].reset_index(drop=True), param_data[p[1]].reset_index(drop=True)
if len(set(start)) < n_p_total:
raise ValueError(f'You must provide a value for every combination of {parameter_columns}.')
if len(start) <= 1:
continue
for i in range(1, len(start)):
e = end[i-1]
s = start[i]
if e > s or s == start[i-1]:
raise ValueError(f'Parameter data must not contain overlaps. Parameter {p} '
f'contains overlapping data.')
if e < s:
raise NotImplementedError(f'Interpolation only supported for parameter columns '
f'with continuous bins. Parameter {p} contains '
f'non-continuous bins.')
|
For any parameters specified with edges, make sure edges
don't overlap and don't have any gaps. Assumes that edges are
specified with ends and starts overlapping (but one exclusive and
the other inclusive) so can check that end of previous == start
of current.
If multiple parameters, make sure all combinations of parameters
are present in data.
|
def merge(d1, d2):
"""This method does cool stuff like append and replace for dicts
d1 = {
"steve": 10,
"gary": 4
}
d2 = {
"&steve": 11,
"-gary": null
}
result = {
"steve": [10, 11]
}
"""
d1, d2 = deepcopy(d1), deepcopy(d2)
if d1 == {} or type(d1) is not dict:
return _merge_fix(d2)
for key in d2.keys():
# "&:arg" join method
if key[0] == '&':
data = d2[key]
key = key[1:]
if key in d1:
if type(d1[key]) is dict and type(data) is dict:
d1[key] = merge(d1[key], data)
elif type(d1[key]) is list:
d1[key].append(data)
else:
d1[key] = [d1[key], data]
else:
# not found, just add it
d1[key] = data
# "-:arg" reduce method
elif key[0] == '-':
data = d2[key]
key = key[1:]
if key in d1:
# simply remove the key
if data is None:
d1.pop(key)
elif type(d1[key]) is list and data in d1[key]:
d1[key].remove(data)
# standard replace method
else:
d1[key] = _merge_fix(d2[key])
return d1
|
This method does cool stuff like append and replace for dicts
d1 = {
"steve": 10,
"gary": 4
}
d2 = {
"&steve": 11,
"-gary": null
}
result = {
"steve": [10, 11]
}
|
def dump_by_server(self, hosts):
"""Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo).
"""
dump_by_endpoint = {}
for endpoint in self._to_endpoints(hosts):
try:
out = self.cmd([endpoint], "dump")
except self.CmdFailed as ex:
out = ""
dump_by_endpoint[endpoint] = out
return dump_by_endpoint
|
Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo).
|
def translate_aliases(kwargs, aliases):
"""Given a dict of keyword arguments and a dict mapping aliases to their
canonical values, canonicalize the keys in the kwargs dict.
:return: A dict continaing all the values in kwargs referenced by their
canonical key.
:raises: `AliasException`, if a canonical key is defined more than once.
"""
result = {}
for given_key, value in kwargs.items():
canonical_key = aliases.get(given_key, given_key)
if canonical_key in result:
# dupe found: go through the dict so we can have a nice-ish error
key_names = ', '.join("{}".format(k) for k in kwargs if
aliases.get(k) == canonical_key)
raise dbt.exceptions.AliasException(
'Got duplicate keys: ({}) all map to "{}"'
.format(key_names, canonical_key)
)
result[canonical_key] = value
return result
|
Given a dict of keyword arguments and a dict mapping aliases to their
canonical values, canonicalize the keys in the kwargs dict.
:return: A dict continaing all the values in kwargs referenced by their
canonical key.
:raises: `AliasException`, if a canonical key is defined more than once.
|
def get_clean_factor(factor,
forward_returns,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
groupby_labels=None,
max_loss=0.35,
zero_aware=False):
"""
Formats the factor data, forward return data, and group mappings into a
DataFrame that contains aligned MultiIndex indices of timestamp and asset.
The returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names must follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property must be set to a trading calendar
(pandas DateOffset), see infer_trading_calendar for more details.
This information is currently used only in cumulative returns
computation
::
---------------------------------------
| | 1D | 5D | 10D
---------------------------------------
date | asset | | |
---------------------------------------
| AAPL | 0.09|-0.01|-0.079
----------------------------
| BA | 0.02| 0.06| 0.020
----------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036
----------------------------
| DAL |-0.02|-0.06|-0.029
----------------------------
| LULU |-0.03| 0.05|-0.009
----------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
'quantiles' is None.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) is the
same as that of the input forward returns data. This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
--------------------------------------------------------
"""
initial_amount = float(len(factor.index))
factor_copy = factor.copy()
factor_copy.index = factor_copy.index.rename(['date', 'asset'])
merged_data = forward_returns.copy()
merged_data['factor'] = factor_copy
if groupby is not None:
if isinstance(groupby, dict):
diff = set(factor_copy.index.get_level_values(
'asset')) - set(groupby.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in group mapping".format(
list(diff)))
ss = pd.Series(groupby)
groupby = pd.Series(index=factor_copy.index,
data=ss[factor_copy.index.get_level_values(
'asset')].values)
if groupby_labels is not None:
diff = set(groupby.values) - set(groupby_labels.keys())
if len(diff) > 0:
raise KeyError(
"groups {} not in passed group names".format(
list(diff)))
sn = pd.Series(groupby_labels)
groupby = pd.Series(index=groupby.index,
data=sn[groupby.values].values)
merged_data['group'] = groupby.astype('category')
merged_data = merged_data.dropna()
fwdret_amount = float(len(merged_data.index))
no_raise = False if max_loss == 0 else True
quantile_data = quantize_factor(
merged_data,
quantiles,
bins,
binning_by_group,
no_raise,
zero_aware
)
merged_data['factor_quantile'] = quantile_data
merged_data = merged_data.dropna()
binning_amount = float(len(merged_data.index))
tot_loss = (initial_amount - binning_amount) / initial_amount
fwdret_loss = (initial_amount - fwdret_amount) / initial_amount
bin_loss = tot_loss - fwdret_loss
print("Dropped %.1f%% entries from factor data: %.1f%% in forward "
"returns computation and %.1f%% in binning phase "
"(set max_loss=0 to see potentially suppressed Exceptions)." %
(tot_loss * 100, fwdret_loss * 100, bin_loss * 100))
if tot_loss > max_loss:
message = ("max_loss (%.1f%%) exceeded %.1f%%, consider increasing it."
% (max_loss * 100, tot_loss * 100))
raise MaxLossExceededError(message)
else:
print("max_loss is %.1f%%, not exceeded: OK!" % (max_loss * 100))
return merged_data
|
Formats the factor data, forward return data, and group mappings into a
DataFrame that contains aligned MultiIndex indices of timestamp and asset.
The returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names must follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property must be set to a trading calendar
(pandas DateOffset), see infer_trading_calendar for more details.
This information is currently used only in cumulative returns
computation
::
---------------------------------------
| | 1D | 5D | 10D
---------------------------------------
date | asset | | |
---------------------------------------
| AAPL | 0.09|-0.01|-0.079
----------------------------
| BA | 0.02| 0.06| 0.020
----------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036
----------------------------
| DAL |-0.02|-0.06|-0.029
----------------------------
| LULU |-0.03| 0.05|-0.009
----------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
'quantiles' is None.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) is the
same as that of the input forward returns data. This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
--------------------------------------------------------
|
def cvtToBlocks(rh, diskSize):
"""
Convert a disk storage value to a number of blocks.
Input:
Request Handle
Size of disk in bytes
Output:
Results structure:
overallRC - Overall return code for the function:
0 - Everything went ok
4 - Input validation error
rc - Return code causing the return. Same as overallRC.
rs - Reason code causing the return.
errno - Errno value causing the return. Always zero.
Converted value in blocks
"""
rh.printSysLog("Enter generalUtils.cvtToBlocks")
blocks = 0
results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0}
blocks = diskSize.strip().upper()
lastChar = blocks[-1]
if lastChar == 'G' or lastChar == 'M':
# Convert the bytes to blocks
byteSize = blocks[:-1]
if byteSize == '':
# The size of the disk is not valid.
msg = msgs.msg['0200'][1] % (modId, blocks)
rh.printLn("ES", msg)
results = msgs.msg['0200'][0]
else:
try:
if lastChar == 'M':
blocks = (float(byteSize) * 1024 * 1024) / 512
elif lastChar == 'G':
blocks = (float(byteSize) * 1024 * 1024 * 1024) / 512
blocks = str(int(math.ceil(blocks)))
except Exception:
# Failed to convert to a number of blocks.
msg = msgs.msg['0201'][1] % (modId, byteSize)
rh.printLn("ES", msg)
results = msgs.msg['0201'][0]
elif blocks.strip('1234567890'):
# Size is not an integer size of blocks.
msg = msgs.msg['0202'][1] % (modId, blocks)
rh.printLn("ES", msg)
results = msgs.msg['0202'][0]
rh.printSysLog("Exit generalUtils.cvtToBlocks, rc: " +
str(results['overallRC']))
return results, blocks
|
Convert a disk storage value to a number of blocks.
Input:
Request Handle
Size of disk in bytes
Output:
Results structure:
overallRC - Overall return code for the function:
0 - Everything went ok
4 - Input validation error
rc - Return code causing the return. Same as overallRC.
rs - Reason code causing the return.
errno - Errno value causing the return. Always zero.
Converted value in blocks
|
def post(self, request, pzone_pk):
"""Add a new operation to the given pzone, return json of the new operation."""
# attempt to get given content list
pzone = None
try:
pzone = PZone.objects.get(pk=pzone_pk)
except PZone.DoesNotExist:
raise Http404("Cannot find given pzone.")
json_obj = []
http_status = 500
json_op = json.loads(request.body.decode("utf8"))
if not isinstance(json_op, list):
json_op = [json_op]
for data in json_op:
try:
serializer = self.get_serializer_class_by_name(data["type_name"])
except ContentType.DoesNotExist as e:
json_obj = {"errors": [str(e)]}
http_status = 400
break
serialized = serializer(data=data)
if serialized.is_valid():
# object is valid, save it
serialized.save()
# set response data
json_obj.append(serialized.data)
http_status = 200
else:
# object is not valid, return errors in a 400 response
json_obj = serialized.errors
http_status = 400
break
if http_status == 200 and len(json_obj) == 1:
json_obj = json_obj[0]
# cache the time in seconds until the next operation occurs
next_ops = PZoneOperation.objects.filter(when__lte=timezone.now())
if len(next_ops) > 0:
# we have at least one operation, ordered soonest first
next_op = next_ops[0]
# cache with expiry number of seconds until op should exec
cache.set('pzone-operation-expiry-' + pzone.name, next_op.when, 60 * 60 * 5)
return Response(
json_obj,
status=http_status,
content_type="application/json"
)
|
Add a new operation to the given pzone, return json of the new operation.
|
def _create(self, format, args):
"""Create a GVariant object from given format and argument list.
This method recursively calls itself for complex structures (arrays,
dictionaries, boxed).
Return a tuple (variant, rest_format, rest_args) with the generated
GVariant, the remainder of the format string, and the remainder of the
arguments.
If args is None, then this won't actually consume any arguments, and
just parse the format string and generate empty GVariant structures.
This is required for creating empty dictionaries or arrays.
"""
# leaves (simple types)
constructor = self._LEAF_CONSTRUCTORS.get(format[0])
if constructor:
if args is not None:
if not args:
raise TypeError('not enough arguments for GVariant format string')
v = constructor(args[0])
return (v, format[1:], args[1:])
else:
return (None, format[1:], None)
if format[0] == '(':
return self._create_tuple(format, args)
if format.startswith('a{'):
return self._create_dict(format, args)
if format[0] == 'a':
return self._create_array(format, args)
raise NotImplementedError('cannot handle GVariant type ' + format)
|
Create a GVariant object from given format and argument list.
This method recursively calls itself for complex structures (arrays,
dictionaries, boxed).
Return a tuple (variant, rest_format, rest_args) with the generated
GVariant, the remainder of the format string, and the remainder of the
arguments.
If args is None, then this won't actually consume any arguments, and
just parse the format string and generate empty GVariant structures.
This is required for creating empty dictionaries or arrays.
|
def add_curie(self, name, href):
"""Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls.
"""
self.draft.set_curie(self, name, href)
return self
|
Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls.
|
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Subclass should override this method if needed.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
if self._label_shapes is None:
# since we do not need labels, we are probably not a module with a loss
# function or predictions, so just ignore this call
return
if pre_sliced:
raise RuntimeError("PythonModule does not support presliced labels")
# by default we expect our outputs are some scores that could be evaluated
eval_metric.update(labels, self.get_outputs())
|
Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Subclass should override this method if needed.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
|
def search_agents(self, start=0, limit=100, filter={}, **kwargs):
'''
search_agents(self, start=0, limit=100, filter={}, **kwargs)
Search agents
:Parameters:
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in agent data and properties)
:return: List of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my Agent'}
search_result = opereto_client.search_agents(filter=filter)
'''
request_data = {'start': start, 'limit': limit, 'filter': filter}
request_data.update(kwargs)
return self._call_rest_api('post', '/search/agents', data=request_data, error='Failed to search agents')
|
search_agents(self, start=0, limit=100, filter={}, **kwargs)
Search agents
:Parameters:
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in agent data and properties)
:return: List of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my Agent'}
search_result = opereto_client.search_agents(filter=filter)
|
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgEphemerisGPSDepF._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
|
Given a binary payload d, update the appropriate payload fields of
the message.
|
def build_fptree(self, transactions, root_value,
root_count, frequent, headers):
"""
Build the FP tree and return the root node.
"""
root = FPNode(root_value, root_count, None)
for transaction in transactions:
sorted_items = [x for x in transaction if x in frequent]
sorted_items.sort(key=lambda x: frequent[x], reverse=True)
if len(sorted_items) > 0:
self.insert_tree(sorted_items, root, headers)
return root
|
Build the FP tree and return the root node.
|
async def set_heater_temp(self, device_id, set_temp):
"""Set heater temp."""
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload)
|
Set heater temp.
|
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func
|
Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
|
def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
'''
Ensure the range is removed
Supplying the end of the range is optional.
State example:
.. code-block:: yaml
infoblox_range.absent:
- name: 'vlan10'
infoblox_range.absent:
- name:
- start_addr: 127.0.1.20
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not data:
data = {}
if 'name' not in data:
data.update({'name': name})
if 'start_addr' not in data:
data.update({'start_addr': start_addr})
if 'end_addr' not in data:
data.update({'end_addr': end_addr})
obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts)
if not obj:
ret['result'] = True
ret['comment'] = 'already deleted'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to delete range'
return ret
if __salt__['infoblox.delete_object'](objref=obj['_ref']):
ret['result'] = True
ret['changes'] = {'old': 'Found {0} - {1}'.format(start_addr, end_addr),
'new': 'Removed'}
return ret
|
Ensure the range is removed
Supplying the end of the range is optional.
State example:
.. code-block:: yaml
infoblox_range.absent:
- name: 'vlan10'
infoblox_range.absent:
- name:
- start_addr: 127.0.1.20
|
def advisory_lock(dax, key, lock_mode=LockMode.wait, xact=False):
"""A context manager for obtaining a lock, executing code, and then releasing
the lock.
A boolean value is passed to the block indicating whether or not the lock was
obtained.
:dax: a DataAccess instance
:key: either a big int or a 2-tuple of integers
:lock_mode: a member of the LockMode enum. Determines how this function
operates:
- wait: the wrapped code will not be executed until the lock
is obtained.
- skip: an attempt will be made to get the lock, and if
unsuccessful, False is passed to the code block
- error: an attempt will be made to get the lock, and if
unsuccessful, an exception will be raised.
:xact: a boolean, if True, the lock will be obtained according to lock_mode,
but will not be released after the code is executed, since it will be
automatically released at the end of the transaction.
"""
if lock_mode == LockMode.wait:
obtain_lock(dax, key, lock_mode, xact)
else:
got_lock = obtain_lock(dax, key, lock_mode, xact)
if not got_lock:
if lock_mode == LockMode.error:
raise Exception("Unable to obtain advisory lock {}".format(key))
else:
# lock_mode is skip
yield False
return
# At this point we have the lock
try:
yield True
finally:
if not xact:
release_lock(dax, key, lock_mode)
|
A context manager for obtaining a lock, executing code, and then releasing
the lock.
A boolean value is passed to the block indicating whether or not the lock was
obtained.
:dax: a DataAccess instance
:key: either a big int or a 2-tuple of integers
:lock_mode: a member of the LockMode enum. Determines how this function
operates:
- wait: the wrapped code will not be executed until the lock
is obtained.
- skip: an attempt will be made to get the lock, and if
unsuccessful, False is passed to the code block
- error: an attempt will be made to get the lock, and if
unsuccessful, an exception will be raised.
:xact: a boolean, if True, the lock will be obtained according to lock_mode,
but will not be released after the code is executed, since it will be
automatically released at the end of the transaction.
|
def _get_samples(n, sim, inc_warmup=True):
# NOTE: this is in stanfit-class.R in RStan (rather than misc.R)
"""Get chains for `n`th parameter.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
chains : list of array
Each chain is an element in the list.
"""
return pystan._misc.get_samples(n, sim, inc_warmup)
|
Get chains for `n`th parameter.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
chains : list of array
Each chain is an element in the list.
|
def presign_v4(method, url, access_key, secret_key, session_token=None,
region=None, headers=None, expires=None, response_headers=None,
request_date=None):
"""
Calculates signature version '4' for regular presigned URLs.
:param method: Method to be presigned examples 'PUT', 'GET'.
:param url: URL to be presigned.
:param access_key: Access key id for your AWS s3 account.
:param secret_key: Secret access key for your AWS s3 account.
:param session_token: Session token key set only for temporary
access credentials.
:param region: region of the bucket, it is optional.
:param headers: any additional HTTP request headers to
be presigned, it is optional.
:param expires: final expiration of the generated URL. Maximum is 7days.
:param response_headers: Specify additional query string parameters.
:param request_date: the date of the request.
"""
# Validate input arguments.
if not access_key or not secret_key:
raise InvalidArgumentError('Invalid access_key and secret_key.')
if region is None:
region = 'us-east-1'
if headers is None:
headers = {}
if expires is None:
expires = '604800'
if request_date is None:
request_date = datetime.utcnow()
parsed_url = urlsplit(url)
content_hash_hex = _UNSIGNED_PAYLOAD
host = parsed_url.netloc
headers['Host'] = host
iso8601Date = request_date.strftime("%Y%m%dT%H%M%SZ")
headers_to_sign = headers
# Construct queries.
query = {}
query['X-Amz-Algorithm'] = _SIGN_V4_ALGORITHM
query['X-Amz-Credential'] = generate_credential_string(access_key,
request_date,
region)
query['X-Amz-Date'] = iso8601Date
query['X-Amz-Expires'] = str(expires)
if session_token:
query['X-Amz-Security-Token'] = session_token
signed_headers = get_signed_headers(headers_to_sign)
query['X-Amz-SignedHeaders'] = ';'.join(signed_headers)
if response_headers is not None:
query.update(response_headers)
# URL components.
url_components = [parsed_url.geturl()]
if query is not None:
ordered_query = collections.OrderedDict(sorted(query.items()))
query_components = []
for component_key in ordered_query:
single_component = [component_key]
if ordered_query[component_key] is not None:
single_component.append('=')
single_component.append(
queryencode(ordered_query[component_key])
)
else:
single_component.append('=')
query_components.append(''.join(single_component))
query_string = '&'.join(query_components)
if query_string:
url_components.append('?')
url_components.append(query_string)
new_url = ''.join(url_components)
# new url constructor block ends.
new_parsed_url = urlsplit(new_url)
canonical_request = generate_canonical_request(method,
new_parsed_url,
headers_to_sign,
signed_headers,
content_hash_hex)
string_to_sign = generate_string_to_sign(request_date, region,
canonical_request)
signing_key = generate_signing_key(request_date, region, secret_key)
signature = hmac.new(signing_key, string_to_sign.encode('utf-8'),
hashlib.sha256).hexdigest()
new_parsed_url = urlsplit(new_url + "&X-Amz-Signature="+signature)
return new_parsed_url.geturl()
|
Calculates signature version '4' for regular presigned URLs.
:param method: Method to be presigned examples 'PUT', 'GET'.
:param url: URL to be presigned.
:param access_key: Access key id for your AWS s3 account.
:param secret_key: Secret access key for your AWS s3 account.
:param session_token: Session token key set only for temporary
access credentials.
:param region: region of the bucket, it is optional.
:param headers: any additional HTTP request headers to
be presigned, it is optional.
:param expires: final expiration of the generated URL. Maximum is 7days.
:param response_headers: Specify additional query string parameters.
:param request_date: the date of the request.
|
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
collapsed_path = _url_collapse_path(self.path)
dir_sep = collapsed_path.find('/', 1)
head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
if head in self.cgi_directories:
self.cgi_info = head, tail
return True
return False
|
Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
|
def peek(self, eof_token=False):
"""Same as :meth:`next`, except the token is not dequeued."""
if len(self.queue) == 0:
self._refill(eof_token)
return self.queue[-1]
|
Same as :meth:`next`, except the token is not dequeued.
|
def get_payments_of_credit_note_per_page(self, credit_note_id, per_page=1000, page=1):
"""
Get payments of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=CREDIT_NOTE_PAYMENTS,
per_page=per_page,
page=page,
params={'credit_note_id': credit_note_id},
)
|
Get payments of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
|
def _von_mises_cdf_normal(x, concentration, dtype):
"""Computes the von Mises CDF and its derivative via Normal approximation."""
def cdf_func(concentration):
"""A helper function that is passed to value_and_gradient."""
# z is an "almost Normally distributed" random variable.
z = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(concentration)) *
tf.sin(.5 * x))
# This is the correction described in [1] which reduces the error
# of the Normal approximation.
z2 = z ** 2
z3 = z2 * z
z4 = z2 ** 2
c = 24. * concentration
c1 = 56.
xi = z - z3 / ((c - 2. * z2 - 16.) / 3. -
(z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)) ** 2
distrib = normal.Normal(tf.cast(0., dtype), tf.cast(1., dtype))
return distrib.cdf(xi)
return value_and_gradient(cdf_func, concentration)
|
Computes the von Mises CDF and its derivative via Normal approximation.
|
def open_document(self, path, encoding=None, replace_tabs_by_spaces=True,
clean_trailing_whitespaces=True, safe_save=True,
restore_cursor_position=True, preferred_eol=0,
autodetect_eol=True, show_whitespaces=False, **kwargs):
"""
Opens a document.
:param path: Path of the document to open
:param encoding: The encoding to use to open the file. Default is
locale.getpreferredencoding().
:param replace_tabs_by_spaces: Enable/Disable replace tabs by spaces.
Default is true.
:param clean_trailing_whitespaces: Enable/Disable clean trailing
whitespaces (on save). Default is True.
:param safe_save: If True, the file is saved to a temporary file first.
If the save went fine, the temporary file is renamed to the final
filename.
:param restore_cursor_position: If true, last cursor position will be
restored. Default is True.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param show_whitespaces: True to show white spaces.
:param kwargs: addtional keyword args to pass to the widget
constructor.
:return: The created code editor
"""
original_path = os.path.normpath(path)
path = os.path.normcase(original_path)
paths = []
widgets = []
for w in self.widgets(include_clones=False):
if os.path.exists(w.file.path):
# skip new docs
widgets.append(w)
paths.append(os.path.normcase(w.file.path))
if path in paths:
i = paths.index(path)
w = widgets[i]
tw = w.parent_tab_widget
tw.setCurrentIndex(tw.indexOf(w))
return w
else:
assert os.path.exists(original_path)
name = os.path.split(original_path)[1]
use_parent_dir = False
for tab in self.widgets():
title = QtCore.QFileInfo(tab.file.path).fileName()
if title == name:
tw = tab.parent_tab_widget
new_name = os.path.join(os.path.split(os.path.dirname(
tab.file.path))[1], title)
tw.setTabText(tw.indexOf(tab), new_name)
use_parent_dir = True
if use_parent_dir:
name = os.path.join(
os.path.split(os.path.dirname(path))[1], name)
use_parent_dir = False
tab = self._create_code_edit(self.guess_mimetype(path), **kwargs)
self.editor_created.emit(tab)
tab.open_parameters = {
'encoding': encoding,
'replace_tabs_by_spaces': replace_tabs_by_spaces,
'clean_trailing_whitespaces': clean_trailing_whitespaces,
'safe_save': safe_save,
'restore_cursor_position': restore_cursor_position,
'preferred_eol': preferred_eol,
'autodetect_eol': autodetect_eol,
'show_whitespaces': show_whitespaces,
'kwargs': kwargs
}
tab.file.clean_trailing_whitespaces = clean_trailing_whitespaces
tab.file.safe_save = safe_save
tab.file.restore_cursor = restore_cursor_position
tab.file.replace_tabs_by_spaces = replace_tabs_by_spaces
tab.file.autodetect_eol = autodetect_eol
tab.file.preferred_eol = preferred_eol
tab.show_whitespaces = show_whitespaces
try:
tab.file.open(original_path, encoding=encoding)
except Exception as e:
_logger().exception('exception while opening file')
tab.close()
tab.setParent(None)
tab.deleteLater()
raise e
else:
tab.setDocumentTitle(name)
tab.file._path = original_path
icon = self._icon(path)
self.add_tab(tab, title=name, icon=icon)
self.document_opened.emit(tab)
for action in self.closed_tabs_menu.actions():
if action.toolTip() == original_path:
self.closed_tabs_menu.removeAction(action)
break
self.closed_tabs_history_btn.setEnabled(
len(self.closed_tabs_menu.actions()) > 0)
return tab
|
Opens a document.
:param path: Path of the document to open
:param encoding: The encoding to use to open the file. Default is
locale.getpreferredencoding().
:param replace_tabs_by_spaces: Enable/Disable replace tabs by spaces.
Default is true.
:param clean_trailing_whitespaces: Enable/Disable clean trailing
whitespaces (on save). Default is True.
:param safe_save: If True, the file is saved to a temporary file first.
If the save went fine, the temporary file is renamed to the final
filename.
:param restore_cursor_position: If true, last cursor position will be
restored. Default is True.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param show_whitespaces: True to show white spaces.
:param kwargs: addtional keyword args to pass to the widget
constructor.
:return: The created code editor
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Amcache.hve file for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
regf_file = pyregf.file() # pylint: disable=no-member
try:
regf_file.open_file_object(file_object)
except IOError:
# The error is currently ignored -> see TODO above related to the
# fixing of handling multiple parsers for the same file format.
return
root_key = regf_file.get_root_key()
if root_key is None:
regf_file.close()
return
root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY)
if root_file_key is None:
regf_file.close()
return
for volume_key in root_file_key.sub_keys:
for am_entry in volume_key.sub_keys:
self._ProcessAMCacheFileKey(am_entry, parser_mediator)
root_program_key = root_key.get_sub_key_by_path(
self._AMCACHE_ROOT_PROGRAM_KEY)
if root_program_key is None:
regf_file.close()
return
for am_entry in root_program_key.sub_keys:
self._ProcessAMCacheProgramKey(am_entry, parser_mediator)
regf_file.close()
|
Parses an Amcache.hve file for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
|
def glob (dirs, patterns):
""" Returns the list of files matching the given pattern in the
specified directory. Both directories and patterns are
supplied as portable paths. Each pattern should be non-absolute
path, and can't contain "." or ".." elements. Each slash separated
element of pattern can contain the following special characters:
- '?', which match any character
- '*', which matches arbitrary number of characters.
A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3
if and only if e1 matches p1, e2 matches p2 and so on.
For example:
[ glob . : *.cpp ]
[ glob . : */build/Jamfile ]
"""
# {
# local result ;
# if $(patterns:D)
# {
# # When a pattern has a directory element, we first glob for
# # directory, and then glob for file name is the found directories.
# for local p in $(patterns)
# {
# # First glob for directory part.
# local globbed-dirs = [ glob $(dirs) : $(p:D) ] ;
# result += [ glob $(globbed-dirs) : $(p:D="") ] ;
# }
# }
# else
# {
# # When a pattern has not directory, we glob directly.
# # Take care of special ".." value. The "GLOB" rule simply ignores
# # the ".." element (and ".") element in directory listings. This is
# # needed so that
# #
# # [ glob libs/*/Jamfile ]
# #
# # don't return
# #
# # libs/../Jamfile (which is the same as ./Jamfile)
# #
# # On the other hand, when ".." is explicitly present in the pattern
# # we need to return it.
# #
# for local dir in $(dirs)
# {
# for local p in $(patterns)
# {
# if $(p) != ".."
# {
# result += [ sequence.transform make
# : [ GLOB [ native $(dir) ] : $(p) ] ] ;
# }
# else
# {
# result += [ path.join $(dir) .. ] ;
# }
# }
# }
# }
# return $(result) ;
# }
#
# TODO: (PF) I replaced the code above by this. I think it should work but needs to be tested.
result = []
dirs = to_seq (dirs)
patterns = to_seq (patterns)
splitdirs = []
for dir in dirs:
splitdirs += dir.split (os.pathsep)
for dir in splitdirs:
for pattern in patterns:
p = os.path.join (dir, pattern)
import glob
result.extend (glob.glob (p))
return result
|
Returns the list of files matching the given pattern in the
specified directory. Both directories and patterns are
supplied as portable paths. Each pattern should be non-absolute
path, and can't contain "." or ".." elements. Each slash separated
element of pattern can contain the following special characters:
- '?', which match any character
- '*', which matches arbitrary number of characters.
A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3
if and only if e1 matches p1, e2 matches p2 and so on.
For example:
[ glob . : *.cpp ]
[ glob . : */build/Jamfile ]
|
def set_data(self, data):
"""Sets this parameter's value on all contexts."""
self.shape = data.shape
if self._data is None:
assert self._deferred_init, \
"Parameter '%s' has not been initialized"%self.name
self._deferred_init = self._deferred_init[:3] + (data,)
return
# if update_on_kvstore, we need to make sure the copy stored in kvstore is in sync
if self._trainer and self._trainer._kv_initialized and self._trainer._update_on_kvstore:
if self not in self._trainer._params_to_init:
self._trainer._reset_kvstore()
for arr in self._check_and_get(self._data, list):
arr[:] = data
|
Sets this parameter's value on all contexts.
|
async def is_owner(self, user):
"""Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
Parameters
-----------
user: :class:`.abc.User`
The user to check for.
"""
if self.owner_id is None:
app = await self.application_info()
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
return user.id == self.owner_id
|
Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
Parameters
-----------
user: :class:`.abc.User`
The user to check for.
|
def get_free_gpus(max_procs=0):
"""
Checks the number of processes running on your GPUs.
Parameters
----------
max_procs : int
Maximum number of procs allowed to run on a gpu for it to be considered
'available'
Returns
-------
availabilities : list(bool)
List of length N for an N-gpu system. The nth value will be true, if the
nth gpu had at most max_procs processes running on it. Set to 0 to look
for gpus with no procs on it.
Note
----
If function can't query the driver will return an empty list rather than raise an
Exception.
"""
# Try connect with NVIDIA drivers
logger = logging.getLogger(__name__)
try:
py3nvml.nvmlInit()
except:
str_ = """Couldn't connect to nvml drivers. Check they are installed correctly."""
warnings.warn(str_, RuntimeWarning)
logger.warn(str_)
return []
num_gpus = py3nvml.nvmlDeviceGetCount()
gpu_free = [False]*num_gpus
for i in range(num_gpus):
try:
h = py3nvml.nvmlDeviceGetHandleByIndex(i)
except:
continue
procs = try_get_info(py3nvml.nvmlDeviceGetComputeRunningProcesses, h,
['something'])
if len(procs) <= max_procs:
gpu_free[i] = True
py3nvml.nvmlShutdown()
return gpu_free
|
Checks the number of processes running on your GPUs.
Parameters
----------
max_procs : int
Maximum number of procs allowed to run on a gpu for it to be considered
'available'
Returns
-------
availabilities : list(bool)
List of length N for an N-gpu system. The nth value will be true, if the
nth gpu had at most max_procs processes running on it. Set to 0 to look
for gpus with no procs on it.
Note
----
If function can't query the driver will return an empty list rather than raise an
Exception.
|
def get_dataset_files(in_path):
"""Gets the files of the given dataset."""
# Get audio files
audio_files = []
for ext in ds_config.audio_exts:
audio_files += glob.glob(
os.path.join(in_path, ds_config.audio_dir, "*" + ext))
# Make sure directories exist
utils.ensure_dir(os.path.join(in_path, ds_config.features_dir))
utils.ensure_dir(os.path.join(in_path, ds_config.estimations_dir))
utils.ensure_dir(os.path.join(in_path, ds_config.references_dir))
# Get the file structs
file_structs = []
for audio_file in audio_files:
file_structs.append(FileStruct(audio_file))
# Sort by audio file name
file_structs = sorted(file_structs,
key=lambda file_struct: file_struct.audio_file)
return file_structs
|
Gets the files of the given dataset.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.