code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def check_filter(self, args, kwargs):
"""
Calls all filters in the :attr:`_filters` list and if all of them
return :const:`True` will return :const:`True`. If any of the filters
return :const:`False` will return :const:`True` instead.
This method is equal to the following snippet:
`all(f(*args, **kwargs) for f in self.filters)`
"""
for f in self._filters:
if not f(*args, **kwargs):
return False
return True | Calls all filters in the :attr:`_filters` list and if all of them
return :const:`True` will return :const:`True`. If any of the filters
return :const:`False` will return :const:`True` instead.
This method is equal to the following snippet:
`all(f(*args, **kwargs) for f in self.filters)` | Below is the the instruction that describes the task:
### Input:
Calls all filters in the :attr:`_filters` list and if all of them
return :const:`True` will return :const:`True`. If any of the filters
return :const:`False` will return :const:`True` instead.
This method is equal to the following snippet:
`all(f(*args, **kwargs) for f in self.filters)`
### Response:
def check_filter(self, args, kwargs):
"""
Calls all filters in the :attr:`_filters` list and if all of them
return :const:`True` will return :const:`True`. If any of the filters
return :const:`False` will return :const:`True` instead.
This method is equal to the following snippet:
`all(f(*args, **kwargs) for f in self.filters)`
"""
for f in self._filters:
if not f(*args, **kwargs):
return False
return True |
def render_tag(self, context, kwargs, nodelist):
'''render content with "active" urls logic'''
# load configuration from passed options
self.load_configuration(**kwargs)
# get request from context
request = context['request']
# get full path from request
self.full_path = request.get_full_path()
# render content of template tag
context.push()
content = nodelist.render(context)
context.pop()
# check content for "active" urls
content = render_content(
content,
full_path=self.full_path,
parent_tag=self.parent_tag,
css_class=self.css_class,
menu=self.menu,
ignore_params=self.ignore_params,
)
return content | render content with "active" urls logic | Below is the the instruction that describes the task:
### Input:
render content with "active" urls logic
### Response:
def render_tag(self, context, kwargs, nodelist):
'''render content with "active" urls logic'''
# load configuration from passed options
self.load_configuration(**kwargs)
# get request from context
request = context['request']
# get full path from request
self.full_path = request.get_full_path()
# render content of template tag
context.push()
content = nodelist.render(context)
context.pop()
# check content for "active" urls
content = render_content(
content,
full_path=self.full_path,
parent_tag=self.parent_tag,
css_class=self.css_class,
menu=self.menu,
ignore_params=self.ignore_params,
)
return content |
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val) | Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False. | Below is the the instruction that describes the task:
### Input:
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
### Response:
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val) |
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding level
color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) + colors[i])
self._cl = np.vstack(level_color) | compute LineVisual color from level index and corresponding level
color | Below is the the instruction that describes the task:
### Input:
compute LineVisual color from level index and corresponding level
color
### Response:
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding level
color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) + colors[i])
self._cl = np.vstack(level_color) |
def install(self, plugin, name=None, **opts):
"""Install plugin to the application."""
source = plugin
if isinstance(plugin, str):
module, _, attr = plugin.partition(':')
module = import_module(module)
plugin = getattr(module, attr or 'Plugin', None)
if isinstance(plugin, types.ModuleType):
plugin = getattr(module, 'Plugin', None)
if plugin is None:
raise MuffinException('Plugin is not found %r' % source)
name = name or plugin.name
if name in self.ps:
raise MuffinException('Plugin with name `%s` is already intalled.' % name)
if isinstance(plugin, type):
plugin = plugin(**opts)
if hasattr(plugin, 'setup'):
plugin.setup(self)
if hasattr(plugin, 'middleware') and plugin.middleware not in self.middlewares:
self.middlewares.append(plugin.middleware)
if hasattr(plugin, 'startup'):
self.on_startup.append(plugin.startup)
if hasattr(plugin, 'cleanup'):
self.on_cleanup.append(plugin.cleanup)
# Save plugin links
self.ps[name] = plugin
return plugin | Install plugin to the application. | Below is the the instruction that describes the task:
### Input:
Install plugin to the application.
### Response:
def install(self, plugin, name=None, **opts):
"""Install plugin to the application."""
source = plugin
if isinstance(plugin, str):
module, _, attr = plugin.partition(':')
module = import_module(module)
plugin = getattr(module, attr or 'Plugin', None)
if isinstance(plugin, types.ModuleType):
plugin = getattr(module, 'Plugin', None)
if plugin is None:
raise MuffinException('Plugin is not found %r' % source)
name = name or plugin.name
if name in self.ps:
raise MuffinException('Plugin with name `%s` is already intalled.' % name)
if isinstance(plugin, type):
plugin = plugin(**opts)
if hasattr(plugin, 'setup'):
plugin.setup(self)
if hasattr(plugin, 'middleware') and plugin.middleware not in self.middlewares:
self.middlewares.append(plugin.middleware)
if hasattr(plugin, 'startup'):
self.on_startup.append(plugin.startup)
if hasattr(plugin, 'cleanup'):
self.on_cleanup.append(plugin.cleanup)
# Save plugin links
self.ps[name] = plugin
return plugin |
def update_done(self, *args, **kwargs):
"""Clear out the previous update"""
kwargs['state'] = 'done'
self.update(*args, **kwargs)
self.rec = None | Clear out the previous update | Below is the the instruction that describes the task:
### Input:
Clear out the previous update
### Response:
def update_done(self, *args, **kwargs):
"""Clear out the previous update"""
kwargs['state'] = 'done'
self.update(*args, **kwargs)
self.rec = None |
def closeGlyphsOverGSUB(gsub, glyphs):
""" Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set.
"""
subsetter = subset.Subsetter()
subsetter.glyphs = glyphs
gsub.closure_glyphs(subsetter) | Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set. | Below is the the instruction that describes the task:
### Input:
Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set.
### Response:
def closeGlyphsOverGSUB(gsub, glyphs):
""" Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set.
"""
subsetter = subset.Subsetter()
subsetter.glyphs = glyphs
gsub.closure_glyphs(subsetter) |
def call(self, service, method, *args, **kwargs):
"""Make a SoftLayer API call.
:param method: the method to call on the service
:param \\*args: (optional) arguments for the remote call
:param id: (optional) id for the resource
:param mask: (optional) object mask
:param dict filter: (optional) filter dict
:param dict headers: (optional) optional XML-RPC headers
:param boolean compress: (optional) Enable/Disable HTTP compression
:param dict raw_headers: (optional) HTTP transport headers
:param int limit: (optional) return at most this many results
:param int offset: (optional) offset results by this many
:param boolean iter: (optional) if True, returns a generator with the
results
:param bool verify: verify SSL cert
:param cert: client certificate path
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client.call('Account', 'getVirtualGuests', mask="id", limit=10)
[...]
"""
if kwargs.pop('iter', False):
# Most of the codebase assumes a non-generator will be returned, so casting to list
# keeps those sections working
return list(self.iter_call(service, method, *args, **kwargs))
invalid_kwargs = set(kwargs.keys()) - VALID_CALL_ARGS
if invalid_kwargs:
raise TypeError(
'Invalid keyword arguments: %s' % ','.join(invalid_kwargs))
if self._prefix and not service.startswith(self._prefix):
service = self._prefix + service
http_headers = {'Accept': '*/*'}
if kwargs.get('compress', True):
http_headers['Accept-Encoding'] = 'gzip, deflate, compress'
else:
http_headers['Accept-Encoding'] = None
if kwargs.get('raw_headers'):
http_headers.update(kwargs.get('raw_headers'))
request = transports.Request()
request.service = service
request.method = method
request.args = args
request.transport_headers = http_headers
request.identifier = kwargs.get('id')
request.mask = kwargs.get('mask')
request.filter = kwargs.get('filter')
request.limit = kwargs.get('limit')
request.offset = kwargs.get('offset')
if kwargs.get('verify') is not None:
request.verify = kwargs.get('verify')
if self.auth:
extra_headers = self.auth.get_headers()
if extra_headers:
warnings.warn("auth.get_headers() is deprecated and will be "
"removed in the next major version",
DeprecationWarning)
request.headers.update(extra_headers)
request = self.auth.get_request(request)
request.headers.update(kwargs.get('headers', {}))
return self.transport(request) | Make a SoftLayer API call.
:param method: the method to call on the service
:param \\*args: (optional) arguments for the remote call
:param id: (optional) id for the resource
:param mask: (optional) object mask
:param dict filter: (optional) filter dict
:param dict headers: (optional) optional XML-RPC headers
:param boolean compress: (optional) Enable/Disable HTTP compression
:param dict raw_headers: (optional) HTTP transport headers
:param int limit: (optional) return at most this many results
:param int offset: (optional) offset results by this many
:param boolean iter: (optional) if True, returns a generator with the
results
:param bool verify: verify SSL cert
:param cert: client certificate path
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client.call('Account', 'getVirtualGuests', mask="id", limit=10)
[...] | Below is the the instruction that describes the task:
### Input:
Make a SoftLayer API call.
:param method: the method to call on the service
:param \\*args: (optional) arguments for the remote call
:param id: (optional) id for the resource
:param mask: (optional) object mask
:param dict filter: (optional) filter dict
:param dict headers: (optional) optional XML-RPC headers
:param boolean compress: (optional) Enable/Disable HTTP compression
:param dict raw_headers: (optional) HTTP transport headers
:param int limit: (optional) return at most this many results
:param int offset: (optional) offset results by this many
:param boolean iter: (optional) if True, returns a generator with the
results
:param bool verify: verify SSL cert
:param cert: client certificate path
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client.call('Account', 'getVirtualGuests', mask="id", limit=10)
[...]
### Response:
def call(self, service, method, *args, **kwargs):
"""Make a SoftLayer API call.
:param method: the method to call on the service
:param \\*args: (optional) arguments for the remote call
:param id: (optional) id for the resource
:param mask: (optional) object mask
:param dict filter: (optional) filter dict
:param dict headers: (optional) optional XML-RPC headers
:param boolean compress: (optional) Enable/Disable HTTP compression
:param dict raw_headers: (optional) HTTP transport headers
:param int limit: (optional) return at most this many results
:param int offset: (optional) offset results by this many
:param boolean iter: (optional) if True, returns a generator with the
results
:param bool verify: verify SSL cert
:param cert: client certificate path
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client.call('Account', 'getVirtualGuests', mask="id", limit=10)
[...]
"""
if kwargs.pop('iter', False):
# Most of the codebase assumes a non-generator will be returned, so casting to list
# keeps those sections working
return list(self.iter_call(service, method, *args, **kwargs))
invalid_kwargs = set(kwargs.keys()) - VALID_CALL_ARGS
if invalid_kwargs:
raise TypeError(
'Invalid keyword arguments: %s' % ','.join(invalid_kwargs))
if self._prefix and not service.startswith(self._prefix):
service = self._prefix + service
http_headers = {'Accept': '*/*'}
if kwargs.get('compress', True):
http_headers['Accept-Encoding'] = 'gzip, deflate, compress'
else:
http_headers['Accept-Encoding'] = None
if kwargs.get('raw_headers'):
http_headers.update(kwargs.get('raw_headers'))
request = transports.Request()
request.service = service
request.method = method
request.args = args
request.transport_headers = http_headers
request.identifier = kwargs.get('id')
request.mask = kwargs.get('mask')
request.filter = kwargs.get('filter')
request.limit = kwargs.get('limit')
request.offset = kwargs.get('offset')
if kwargs.get('verify') is not None:
request.verify = kwargs.get('verify')
if self.auth:
extra_headers = self.auth.get_headers()
if extra_headers:
warnings.warn("auth.get_headers() is deprecated and will be "
"removed in the next major version",
DeprecationWarning)
request.headers.update(extra_headers)
request = self.auth.get_request(request)
request.headers.update(kwargs.get('headers', {}))
return self.transport(request) |
def includes(self, lo_freq: float) -> bool:
"""Whether `lo_freq` is within the `LoRange`.
Args:
lo_freq: LO frequency to be checked
Returns:
bool: True if lo_freq is included in this range, otherwise False
"""
if self._lb <= lo_freq <= self._ub:
return True
return False | Whether `lo_freq` is within the `LoRange`.
Args:
lo_freq: LO frequency to be checked
Returns:
bool: True if lo_freq is included in this range, otherwise False | Below is the the instruction that describes the task:
### Input:
Whether `lo_freq` is within the `LoRange`.
Args:
lo_freq: LO frequency to be checked
Returns:
bool: True if lo_freq is included in this range, otherwise False
### Response:
def includes(self, lo_freq: float) -> bool:
"""Whether `lo_freq` is within the `LoRange`.
Args:
lo_freq: LO frequency to be checked
Returns:
bool: True if lo_freq is included in this range, otherwise False
"""
if self._lb <= lo_freq <= self._ub:
return True
return False |
def _init_settings(self):
""" Init setting """
self._show_whitespaces = False
self._tab_length = 4
self._use_spaces_instead_of_tabs = True
self.setTabStopWidth(self._tab_length *
self.fontMetrics().width(" "))
self._set_whitespaces_flags(self._show_whitespaces) | Init setting | Below is the the instruction that describes the task:
### Input:
Init setting
### Response:
def _init_settings(self):
""" Init setting """
self._show_whitespaces = False
self._tab_length = 4
self._use_spaces_instead_of_tabs = True
self.setTabStopWidth(self._tab_length *
self.fontMetrics().width(" "))
self._set_whitespaces_flags(self._show_whitespaces) |
def find_path(self, test_function=None, on_targets=False):
"""
General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True
"""
assert self.has_referential_domain(), "need context set"
if not test_function:
test_function = lambda x, y: True
def find_path_inner(part, prefix):
name, structure = part
if test_function(name, structure):
yield prefix + [name]
if isinstance(structure, DictCell):
for sub_structure in structure:
for prefix2 in find_path_inner(sub_structure,\
prefix[:] + [name]):
yield prefix2
prefix = []
if on_targets:
# apply search to the first target
results = []
for _, instance in self.iter_singleton_referents():
for part in instance:
for entry in find_path_inner(part, prefix[:]):
results.append(['target'] + entry)
while results:
yield results.pop()
break # only use first instance
else:
# apply search to self
for part in self:
for entry in find_path_inner(part, prefix[:]):
yield entry | General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True | Below is the the instruction that describes the task:
### Input:
General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True
### Response:
def find_path(self, test_function=None, on_targets=False):
"""
General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True
"""
assert self.has_referential_domain(), "need context set"
if not test_function:
test_function = lambda x, y: True
def find_path_inner(part, prefix):
name, structure = part
if test_function(name, structure):
yield prefix + [name]
if isinstance(structure, DictCell):
for sub_structure in structure:
for prefix2 in find_path_inner(sub_structure,\
prefix[:] + [name]):
yield prefix2
prefix = []
if on_targets:
# apply search to the first target
results = []
for _, instance in self.iter_singleton_referents():
for part in instance:
for entry in find_path_inner(part, prefix[:]):
results.append(['target'] + entry)
while results:
yield results.pop()
break # only use first instance
else:
# apply search to self
for part in self:
for entry in find_path_inner(part, prefix[:]):
yield entry |
def ttl(self):
"""LeaseTimeToLive retrieves lease information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
"""
result = self.client.post(self.client.get_url("/kv/lease/timetolive"),
json={"ID": self.id})
return int(result['TTL']) | LeaseTimeToLive retrieves lease information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return: | Below is the the instruction that describes the task:
### Input:
LeaseTimeToLive retrieves lease information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
### Response:
def ttl(self):
"""LeaseTimeToLive retrieves lease information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
"""
result = self.client.post(self.client.get_url("/kv/lease/timetolive"),
json={"ID": self.id})
return int(result['TTL']) |
def detach_user_policy(policy_name, user_name,
region=None, key=None, keyid=None, profile=None):
'''
Detach a managed policy to a user.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.detach_user_policy mypolicy myuser
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.detach_user_policy(policy_arn, user_name)
log.info('Detached %s policy from IAM user %s.', policy_name, user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to detach %s policy from IAM user %s.', policy_name, user_name)
return False
return True | Detach a managed policy to a user.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.detach_user_policy mypolicy myuser | Below is the the instruction that describes the task:
### Input:
Detach a managed policy to a user.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.detach_user_policy mypolicy myuser
### Response:
def detach_user_policy(policy_name, user_name,
region=None, key=None, keyid=None, profile=None):
'''
Detach a managed policy to a user.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.detach_user_policy mypolicy myuser
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.detach_user_policy(policy_arn, user_name)
log.info('Detached %s policy from IAM user %s.', policy_name, user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to detach %s policy from IAM user %s.', policy_name, user_name)
return False
return True |
def get_available_networks(self, **kwargs):
"""
Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available
to a server profile template along with their respective ports. The scopeUris, serverHardwareTypeUri and
enclosureGroupUri parameters should be specified to get the available networks for a new server profile template.
The serverHardwareTypeUri, enclosureGroupUri, and profileTemplateUri should be specified to get available
networks for an existing server profile template.
The scopeUris parameter is ignored when the profileTemplateUri is specified.
Args:
enclosureGroupUri: The URI of the enclosure group is required when the serverHardwareTypeUri
specifies a blade server.
profileTemplateUri: If the URI of the server profile template is provided the list of available
networks will include only networks that share a scope with the server profile template.
scopeUris: An expression to restrict the resources returned according to the scopes
to which they are assigned.
serverHardwareTypeUri: If the server hardware type specifies a rack server, the list of
available network includes all networks that are applicable for the specified server hardware type.
If the server hardware type specifies a blade server, the enclosureGroupUri parameter must be
specified, and the list of available networks includes all networks that are applicable for the
specified server hardware type and all empty bays within the enclosure group that can support
the specified server hardware type.
view: The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.
Returns:
dict: Dictionary with available networks details.
"""
query_string = '&'.join('{}={}'.format(key, value)
for key, value in kwargs.items() if value)
uri = self.URI + "{}?{}".format("/available-networks", query_string)
return self._helper.do_get(uri) | Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available
to a server profile template along with their respective ports. The scopeUris, serverHardwareTypeUri and
enclosureGroupUri parameters should be specified to get the available networks for a new server profile template.
The serverHardwareTypeUri, enclosureGroupUri, and profileTemplateUri should be specified to get available
networks for an existing server profile template.
The scopeUris parameter is ignored when the profileTemplateUri is specified.
Args:
enclosureGroupUri: The URI of the enclosure group is required when the serverHardwareTypeUri
specifies a blade server.
profileTemplateUri: If the URI of the server profile template is provided the list of available
networks will include only networks that share a scope with the server profile template.
scopeUris: An expression to restrict the resources returned according to the scopes
to which they are assigned.
serverHardwareTypeUri: If the server hardware type specifies a rack server, the list of
available network includes all networks that are applicable for the specified server hardware type.
If the server hardware type specifies a blade server, the enclosureGroupUri parameter must be
specified, and the list of available networks includes all networks that are applicable for the
specified server hardware type and all empty bays within the enclosure group that can support
the specified server hardware type.
view: The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.
Returns:
dict: Dictionary with available networks details. | Below is the the instruction that describes the task:
### Input:
Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available
to a server profile template along with their respective ports. The scopeUris, serverHardwareTypeUri and
enclosureGroupUri parameters should be specified to get the available networks for a new server profile template.
The serverHardwareTypeUri, enclosureGroupUri, and profileTemplateUri should be specified to get available
networks for an existing server profile template.
The scopeUris parameter is ignored when the profileTemplateUri is specified.
Args:
enclosureGroupUri: The URI of the enclosure group is required when the serverHardwareTypeUri
specifies a blade server.
profileTemplateUri: If the URI of the server profile template is provided the list of available
networks will include only networks that share a scope with the server profile template.
scopeUris: An expression to restrict the resources returned according to the scopes
to which they are assigned.
serverHardwareTypeUri: If the server hardware type specifies a rack server, the list of
available network includes all networks that are applicable for the specified server hardware type.
If the server hardware type specifies a blade server, the enclosureGroupUri parameter must be
specified, and the list of available networks includes all networks that are applicable for the
specified server hardware type and all empty bays within the enclosure group that can support
the specified server hardware type.
view: The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.
Returns:
dict: Dictionary with available networks details.
### Response:
def get_available_networks(self, **kwargs):
"""
Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available
to a server profile template along with their respective ports. The scopeUris, serverHardwareTypeUri and
enclosureGroupUri parameters should be specified to get the available networks for a new server profile template.
The serverHardwareTypeUri, enclosureGroupUri, and profileTemplateUri should be specified to get available
networks for an existing server profile template.
The scopeUris parameter is ignored when the profileTemplateUri is specified.
Args:
enclosureGroupUri: The URI of the enclosure group is required when the serverHardwareTypeUri
specifies a blade server.
profileTemplateUri: If the URI of the server profile template is provided the list of available
networks will include only networks that share a scope with the server profile template.
scopeUris: An expression to restrict the resources returned according to the scopes
to which they are assigned.
serverHardwareTypeUri: If the server hardware type specifies a rack server, the list of
available network includes all networks that are applicable for the specified server hardware type.
If the server hardware type specifies a blade server, the enclosureGroupUri parameter must be
specified, and the list of available networks includes all networks that are applicable for the
specified server hardware type and all empty bays within the enclosure group that can support
the specified server hardware type.
view: The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.
Returns:
dict: Dictionary with available networks details.
"""
query_string = '&'.join('{}={}'.format(key, value)
for key, value in kwargs.items() if value)
uri = self.URI + "{}?{}".format("/available-networks", query_string)
return self._helper.do_get(uri) |
def update_product_set(
self,
product_set,
location=None,
product_set_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
self.log.info('Updating ProductSet: %s', product_set.name)
response = client.update_product_set(
product_set=product_set, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('ProductSet updated: %s', response.name if response else '')
self.log.debug('ProductSet updated:\n%s', response)
return MessageToDict(response) | For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator` | Below is the the instruction that describes the task:
### Input:
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
### Response:
def update_product_set(
self,
product_set,
location=None,
product_set_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
self.log.info('Updating ProductSet: %s', product_set.name)
response = client.update_product_set(
product_set=product_set, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('ProductSet updated: %s', response.name if response else '')
self.log.debug('ProductSet updated:\n%s', response)
return MessageToDict(response) |
def updates(self, offset=None):
"""Fetch the messages that a bot can read.
When the `offset` is given it will retrieve all the messages
that are greater or equal to that offset. Take into account
that, due to how the API works, all previous messages will
be removed from the server.
:param offset: fetch the messages starting on this offset
"""
params = {}
if offset:
params[self.OFFSET] = offset
response = self._call(self.UPDATES_METHOD, params)
return response | Fetch the messages that a bot can read.
When the `offset` is given it will retrieve all the messages
that are greater or equal to that offset. Take into account
that, due to how the API works, all previous messages will
be removed from the server.
:param offset: fetch the messages starting on this offset | Below is the the instruction that describes the task:
### Input:
Fetch the messages that a bot can read.
When the `offset` is given it will retrieve all the messages
that are greater or equal to that offset. Take into account
that, due to how the API works, all previous messages will
be removed from the server.
:param offset: fetch the messages starting on this offset
### Response:
def updates(self, offset=None):
"""Fetch the messages that a bot can read.
When the `offset` is given it will retrieve all the messages
that are greater or equal to that offset. Take into account
that, due to how the API works, all previous messages will
be removed from the server.
:param offset: fetch the messages starting on this offset
"""
params = {}
if offset:
params[self.OFFSET] = offset
response = self._call(self.UPDATES_METHOD, params)
return response |
def list(self):
"""
Get all current labels
:return: The Logentries API response
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return self._post(
request='list',
uri=ApiUri.TAGS.value,
).get('tags') | Get all current labels
:return: The Logentries API response
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries | Below is the the instruction that describes the task:
### Input:
Get all current labels
:return: The Logentries API response
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
### Response:
def list(self):
"""
Get all current labels
:return: The Logentries API response
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return self._post(
request='list',
uri=ApiUri.TAGS.value,
).get('tags') |
def run_sweep(
self,
program: Union[circuits.Circuit, schedules.Schedule],
params: study.Sweepable,
repetitions: int = 1,
) -> List[study.TrialResult]:
"""Runs the supplied Circuit or Schedule, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit or schedule to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
TrialResult list for this run; one for each possible parameter
resolver.
"""
circuit = (program if isinstance(program, circuits.Circuit)
else program.to_circuit())
param_resolvers = study.to_resolvers(params)
trial_results = [] # type: List[study.TrialResult]
for param_resolver in param_resolvers:
measurements = self._run(circuit=circuit,
param_resolver=param_resolver,
repetitions=repetitions)
trial_results.append(study.TrialResult(params=param_resolver,
repetitions=repetitions,
measurements=measurements))
return trial_results | Runs the supplied Circuit or Schedule, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit or schedule to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
TrialResult list for this run; one for each possible parameter
resolver. | Below is the the instruction that describes the task:
### Input:
Runs the supplied Circuit or Schedule, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit or schedule to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
TrialResult list for this run; one for each possible parameter
resolver.
### Response:
def run_sweep(
self,
program: Union[circuits.Circuit, schedules.Schedule],
params: study.Sweepable,
repetitions: int = 1,
) -> List[study.TrialResult]:
"""Runs the supplied Circuit or Schedule, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit or schedule to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
TrialResult list for this run; one for each possible parameter
resolver.
"""
circuit = (program if isinstance(program, circuits.Circuit)
else program.to_circuit())
param_resolvers = study.to_resolvers(params)
trial_results = [] # type: List[study.TrialResult]
for param_resolver in param_resolvers:
measurements = self._run(circuit=circuit,
param_resolver=param_resolver,
repetitions=repetitions)
trial_results.append(study.TrialResult(params=param_resolver,
repetitions=repetitions,
measurements=measurements))
return trial_results |
def previous_day(self):
"""Return the HDate for the previous day."""
return HDate(self.gdate + datetime.timedelta(-1), self.diaspora,
self.hebrew) | Return the HDate for the previous day. | Below is the the instruction that describes the task:
### Input:
Return the HDate for the previous day.
### Response:
def previous_day(self):
"""Return the HDate for the previous day."""
return HDate(self.gdate + datetime.timedelta(-1), self.diaspora,
self.hebrew) |
def _construct_node_from_actions(self,
current_node: Tree,
remaining_actions: List[List[str]]) -> List[List[str]]:
"""
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
"""
if not remaining_actions:
logger.error("No actions left to construct current node: %s", current_node)
raise ParsingError("Incomplete action sequence")
left_side, right_side = remaining_actions.pop(0)
if left_side != current_node.label():
logger.error("Current node: %s", current_node)
logger.error("Next action: %s -> %s", left_side, right_side)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Current node does not match next action")
if right_side[0] == '[':
# This is a non-terminal expansion, with more than one child node.
for child_type in right_side[1:-1].split(', '):
child_node = Tree(child_type, [])
current_node.append(child_node) # you add a child to an nltk.Tree with `append`
# For now, we assume that all children in a list like this are non-terminals, so we
# recurse on them. I'm pretty sure that will always be true for the way our
# grammar induction works. We can revisit this later if we need to.
remaining_actions = self._construct_node_from_actions(child_node, remaining_actions)
else:
# The current node is a pre-terminal; we'll add a single terminal child. By
# construction, the right-hand side of our production rules are only ever terminal
# productions or lists of non-terminals.
current_node.append(Tree(right_side, [])) # you add a child to an nltk.Tree with `append`
return remaining_actions | Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned. | Below is the the instruction that describes the task:
### Input:
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
### Response:
def _construct_node_from_actions(self,
current_node: Tree,
remaining_actions: List[List[str]]) -> List[List[str]]:
"""
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
"""
if not remaining_actions:
logger.error("No actions left to construct current node: %s", current_node)
raise ParsingError("Incomplete action sequence")
left_side, right_side = remaining_actions.pop(0)
if left_side != current_node.label():
logger.error("Current node: %s", current_node)
logger.error("Next action: %s -> %s", left_side, right_side)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Current node does not match next action")
if right_side[0] == '[':
# This is a non-terminal expansion, with more than one child node.
for child_type in right_side[1:-1].split(', '):
child_node = Tree(child_type, [])
current_node.append(child_node) # you add a child to an nltk.Tree with `append`
# For now, we assume that all children in a list like this are non-terminals, so we
# recurse on them. I'm pretty sure that will always be true for the way our
# grammar induction works. We can revisit this later if we need to.
remaining_actions = self._construct_node_from_actions(child_node, remaining_actions)
else:
# The current node is a pre-terminal; we'll add a single terminal child. By
# construction, the right-hand side of our production rules are only ever terminal
# productions or lists of non-terminals.
current_node.append(Tree(right_side, [])) # you add a child to an nltk.Tree with `append`
return remaining_actions |
def SymmetricDifference(self, scriptnames):
'''Takes in a set, list, or tuple scriptnames and returns the symmetric difference (as a list)
of scriptnames and the stored names.'''
scriptnames = set(scriptnames)
myscripts = set(self.scripts.keys())
return list(scriptnames.difference(myscripts).union(myscripts.difference(scriptnames))) | Takes in a set, list, or tuple scriptnames and returns the symmetric difference (as a list)
of scriptnames and the stored names. | Below is the the instruction that describes the task:
### Input:
Takes in a set, list, or tuple scriptnames and returns the symmetric difference (as a list)
of scriptnames and the stored names.
### Response:
def SymmetricDifference(self, scriptnames):
'''Takes in a set, list, or tuple scriptnames and returns the symmetric difference (as a list)
of scriptnames and the stored names.'''
scriptnames = set(scriptnames)
myscripts = set(self.scripts.keys())
return list(scriptnames.difference(myscripts).union(myscripts.difference(scriptnames))) |
def update_local_repo(self, force=False):
"""Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest
commits from master to bring the local copy up to date."""
self.ensure_local_repo()
logging.info('Updating local repo {}'.format(self.remote_path))
managed_repo = git.Repo(self.managed_path)
with git_error_handling():
managed_repo.remote().pull('master')
log_to_client('Updated managed copy of {}'.format(self.remote_path))
if not self.local_is_up_to_date():
if force:
with git_error_handling():
managed_repo.git.reset('--hard', 'origin/master')
else:
log_to_client('WARNING: couldn\'t update {} because of local conflicts. '
'A container may have modified files in the repos\'s directory. '
'Your code generally shouldn\'t be manipulating the contents of your repo folder - '
'please fix this and run `dusty up`'.format(self.managed_path)) | Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest
commits from master to bring the local copy up to date. | Below is the the instruction that describes the task:
### Input:
Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest
commits from master to bring the local copy up to date.
### Response:
def update_local_repo(self, force=False):
"""Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest
commits from master to bring the local copy up to date."""
self.ensure_local_repo()
logging.info('Updating local repo {}'.format(self.remote_path))
managed_repo = git.Repo(self.managed_path)
with git_error_handling():
managed_repo.remote().pull('master')
log_to_client('Updated managed copy of {}'.format(self.remote_path))
if not self.local_is_up_to_date():
if force:
with git_error_handling():
managed_repo.git.reset('--hard', 'origin/master')
else:
log_to_client('WARNING: couldn\'t update {} because of local conflicts. '
'A container may have modified files in the repos\'s directory. '
'Your code generally shouldn\'t be manipulating the contents of your repo folder - '
'please fix this and run `dusty up`'.format(self.managed_path)) |
def _number_negative_start_handler(c, ctx):
"""Handles numeric values that start with a negative sign. Branches to delegate co-routines according to
_NEGATIVE_TABLE.
"""
assert c == _MINUS
assert len(ctx.value) == 0
ctx.set_ion_type(IonType.INT)
ctx.value.append(c)
c, _ = yield
yield ctx.immediate_transition(_NEGATIVE_TABLE[c](c, ctx)) | Handles numeric values that start with a negative sign. Branches to delegate co-routines according to
_NEGATIVE_TABLE. | Below is the the instruction that describes the task:
### Input:
Handles numeric values that start with a negative sign. Branches to delegate co-routines according to
_NEGATIVE_TABLE.
### Response:
def _number_negative_start_handler(c, ctx):
"""Handles numeric values that start with a negative sign. Branches to delegate co-routines according to
_NEGATIVE_TABLE.
"""
assert c == _MINUS
assert len(ctx.value) == 0
ctx.set_ion_type(IonType.INT)
ctx.value.append(c)
c, _ = yield
yield ctx.immediate_transition(_NEGATIVE_TABLE[c](c, ctx)) |
def from_timestamp_pb(cls, stamp):
"""Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
"""
microseconds = int(stamp.seconds * 1e6)
bare = from_microseconds(microseconds)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=stamp.nanos,
tzinfo=pytz.UTC,
) | Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message | Below is the the instruction that describes the task:
### Input:
Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
### Response:
def from_timestamp_pb(cls, stamp):
"""Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
"""
microseconds = int(stamp.seconds * 1e6)
bare = from_microseconds(microseconds)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=stamp.nanos,
tzinfo=pytz.UTC,
) |
def projects_from_metadata(metadata):
"""Extract the project dependencies from a metadata spec."""
projects = []
for data in metadata:
meta = distlib.metadata.Metadata(fileobj=io.StringIO(data))
projects.extend(pypi.just_name(project) for project in meta.run_requires)
return frozenset(map(packaging.utils.canonicalize_name, projects)) | Extract the project dependencies from a metadata spec. | Below is the the instruction that describes the task:
### Input:
Extract the project dependencies from a metadata spec.
### Response:
def projects_from_metadata(metadata):
"""Extract the project dependencies from a metadata spec."""
projects = []
for data in metadata:
meta = distlib.metadata.Metadata(fileobj=io.StringIO(data))
projects.extend(pypi.just_name(project) for project in meta.run_requires)
return frozenset(map(packaging.utils.canonicalize_name, projects)) |
def PathCollection(mode="agg", *args, **kwargs):
"""
mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: medium, size: medium output: nice, some flaws, no dash)
- "agg+" (speed: slow, size: big, output: perfect, no dash)
"""
if mode == "raw":
return RawPathCollection(*args, **kwargs)
elif mode == "agg+":
return AggPathCollection(*args, **kwargs)
return AggFastPathCollection(*args, **kwargs) | mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: medium, size: medium output: nice, some flaws, no dash)
- "agg+" (speed: slow, size: big, output: perfect, no dash) | Below is the the instruction that describes the task:
### Input:
mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: medium, size: medium output: nice, some flaws, no dash)
- "agg+" (speed: slow, size: big, output: perfect, no dash)
### Response:
def PathCollection(mode="agg", *args, **kwargs):
"""
mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: medium, size: medium output: nice, some flaws, no dash)
- "agg+" (speed: slow, size: big, output: perfect, no dash)
"""
if mode == "raw":
return RawPathCollection(*args, **kwargs)
elif mode == "agg+":
return AggPathCollection(*args, **kwargs)
return AggFastPathCollection(*args, **kwargs) |
def get_next_assessment_part_id(self, assessment_part_id=None):
"""This supports the basic simple sequence case. Can be overriden in a record for other cases"""
if assessment_part_id is None:
part_id = self.get_id()
else:
part_id = assessment_part_id
return get_next_part_id(part_id,
runtime=self._runtime,
proxy=self._proxy,
sequestered=True)[0] | This supports the basic simple sequence case. Can be overriden in a record for other cases | Below is the the instruction that describes the task:
### Input:
This supports the basic simple sequence case. Can be overriden in a record for other cases
### Response:
def get_next_assessment_part_id(self, assessment_part_id=None):
"""This supports the basic simple sequence case. Can be overriden in a record for other cases"""
if assessment_part_id is None:
part_id = self.get_id()
else:
part_id = assessment_part_id
return get_next_part_id(part_id,
runtime=self._runtime,
proxy=self._proxy,
sequestered=True)[0] |
def gen_challenge(self, state):
"""This function generates a challenge for given state. It selects a
random number and sets that as the challenge key. By default, v_max
is set to the prime, and the number of chunks to challenge is the
number of chunks in the file. (this doesn't guarantee that the whole
file will be checked since some chunks could be selected twice and
some selected none.
:param state: the state to use. it can be encrypted, as it will
have just been received from the server
"""
state.decrypt(self.key)
chal = Challenge(state.chunks, self.prime, Random.new().read(32))
return chal | This function generates a challenge for given state. It selects a
random number and sets that as the challenge key. By default, v_max
is set to the prime, and the number of chunks to challenge is the
number of chunks in the file. (this doesn't guarantee that the whole
file will be checked since some chunks could be selected twice and
some selected none.
:param state: the state to use. it can be encrypted, as it will
have just been received from the server | Below is the the instruction that describes the task:
### Input:
This function generates a challenge for given state. It selects a
random number and sets that as the challenge key. By default, v_max
is set to the prime, and the number of chunks to challenge is the
number of chunks in the file. (this doesn't guarantee that the whole
file will be checked since some chunks could be selected twice and
some selected none.
:param state: the state to use. it can be encrypted, as it will
have just been received from the server
### Response:
def gen_challenge(self, state):
"""This function generates a challenge for given state. It selects a
random number and sets that as the challenge key. By default, v_max
is set to the prime, and the number of chunks to challenge is the
number of chunks in the file. (this doesn't guarantee that the whole
file will be checked since some chunks could be selected twice and
some selected none.
:param state: the state to use. it can be encrypted, as it will
have just been received from the server
"""
state.decrypt(self.key)
chal = Challenge(state.chunks, self.prime, Random.new().read(32))
return chal |
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
FakePathSpec: a path specification.
"""
location = getattr(self.path_spec, 'location', None)
if location is not None:
paths = self._file_system.GetPaths()
for path in iter(paths.keys()):
# Determine if the start of the path is similar to the location string.
# If not the file the path refers to is not in the same directory.
if not path or not path.startswith(location):
continue
_, suffix = self._file_system.GetPathSegmentAndSuffix(location, path)
# Ignore anything that is part of a sub directory or the directory
# itself.
if suffix or path == location:
continue
path_spec_location = self._file_system.JoinPath([path])
yield fake_path_spec.FakePathSpec(location=path_spec_location) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
FakePathSpec: a path specification. | Below is the the instruction that describes the task:
### Input:
Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
FakePathSpec: a path specification.
### Response:
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
FakePathSpec: a path specification.
"""
location = getattr(self.path_spec, 'location', None)
if location is not None:
paths = self._file_system.GetPaths()
for path in iter(paths.keys()):
# Determine if the start of the path is similar to the location string.
# If not the file the path refers to is not in the same directory.
if not path or not path.startswith(location):
continue
_, suffix = self._file_system.GetPathSegmentAndSuffix(location, path)
# Ignore anything that is part of a sub directory or the directory
# itself.
if suffix or path == location:
continue
path_spec_location = self._file_system.JoinPath([path])
yield fake_path_spec.FakePathSpec(location=path_spec_location) |
def _exclude_ss_bonded_cysteines(self):
"""
Pre-compute ss bonds to discard cystines for H-adding.
"""
ss_bonds = self.nh_structure.search_ss_bonds()
for cys_pair in ss_bonds:
cys1, cys2 = cys_pair
cys1.resname = 'CYX'
cys2.resname = 'CYX' | Pre-compute ss bonds to discard cystines for H-adding. | Below is the the instruction that describes the task:
### Input:
Pre-compute ss bonds to discard cystines for H-adding.
### Response:
def _exclude_ss_bonded_cysteines(self):
"""
Pre-compute ss bonds to discard cystines for H-adding.
"""
ss_bonds = self.nh_structure.search_ss_bonds()
for cys_pair in ss_bonds:
cys1, cys2 = cys_pair
cys1.resname = 'CYX'
cys2.resname = 'CYX' |
def call_sphinx(out_type, build_dir = "build"):
"""
Call the ``sphinx-build`` for the given output type and the ``make`` when
the target has this possibility.
Parameters
----------
out_type :
A builder name for ``sphinx-build``. See the full list at
`<http://sphinx-doc.org/invocation.html>`_.
build_dir :
Directory for storing the output. Defaults to "build".
"""
sphinx_string = sphinx_template.format(build_dir=build_dir,
out_type=out_type)
if sphinx.main(shlex.split(sphinx_string)) != 0:
raise RuntimeError("Something went wrong while building '{0}'"
.format(out_type))
if out_type in make_target:
make_string = make_template.format(build_dir=build_dir,
out_type=out_type,
make_param=make_target[out_type])
call(shlex.split(make_string)) | Call the ``sphinx-build`` for the given output type and the ``make`` when
the target has this possibility.
Parameters
----------
out_type :
A builder name for ``sphinx-build``. See the full list at
`<http://sphinx-doc.org/invocation.html>`_.
build_dir :
Directory for storing the output. Defaults to "build". | Below is the the instruction that describes the task:
### Input:
Call the ``sphinx-build`` for the given output type and the ``make`` when
the target has this possibility.
Parameters
----------
out_type :
A builder name for ``sphinx-build``. See the full list at
`<http://sphinx-doc.org/invocation.html>`_.
build_dir :
Directory for storing the output. Defaults to "build".
### Response:
def call_sphinx(out_type, build_dir = "build"):
"""
Call the ``sphinx-build`` for the given output type and the ``make`` when
the target has this possibility.
Parameters
----------
out_type :
A builder name for ``sphinx-build``. See the full list at
`<http://sphinx-doc.org/invocation.html>`_.
build_dir :
Directory for storing the output. Defaults to "build".
"""
sphinx_string = sphinx_template.format(build_dir=build_dir,
out_type=out_type)
if sphinx.main(shlex.split(sphinx_string)) != 0:
raise RuntimeError("Something went wrong while building '{0}'"
.format(out_type))
if out_type in make_target:
make_string = make_template.format(build_dir=build_dir,
out_type=out_type,
make_param=make_target[out_type])
call(shlex.split(make_string)) |
def randomizeSequence(sequence, symbolsPerSequence, numColumns, sparsity, p = 0.25):
"""
Takes a sequence as input and randomizes a percentage p of it by choosing
SDRs at random while preserving the remaining invariant.
@param sequence (array) sequence to be randomized
@param symbolsPerSequence (int) number of symbols per sequence
@param numColumns (int) number of columns in the TM
@param sparsity (float) percentage of sparsity
@p (float) percentage of symbols to be replaced
@return randomizedSequence (array) sequence that contains p percentage of new SDRs
"""
randomizedSequence = []
sparseCols = int(numColumns * sparsity)
numSymbolsToChange = int(symbolsPerSequence * p)
symIndices = np.random.permutation(np.arange(symbolsPerSequence))
for symbol in range(symbolsPerSequence):
randomizedSequence.append(sequence[symbol])
i = 0
while numSymbolsToChange > 0:
randomizedSequence[symIndices[i]] = generateRandomSymbol(numColumns, sparseCols)
i += 1
numSymbolsToChange -= 1
return randomizedSequence | Takes a sequence as input and randomizes a percentage p of it by choosing
SDRs at random while preserving the remaining invariant.
@param sequence (array) sequence to be randomized
@param symbolsPerSequence (int) number of symbols per sequence
@param numColumns (int) number of columns in the TM
@param sparsity (float) percentage of sparsity
@p (float) percentage of symbols to be replaced
@return randomizedSequence (array) sequence that contains p percentage of new SDRs | Below is the the instruction that describes the task:
### Input:
Takes a sequence as input and randomizes a percentage p of it by choosing
SDRs at random while preserving the remaining invariant.
@param sequence (array) sequence to be randomized
@param symbolsPerSequence (int) number of symbols per sequence
@param numColumns (int) number of columns in the TM
@param sparsity (float) percentage of sparsity
@p (float) percentage of symbols to be replaced
@return randomizedSequence (array) sequence that contains p percentage of new SDRs
### Response:
def randomizeSequence(sequence, symbolsPerSequence, numColumns, sparsity, p = 0.25):
"""
Takes a sequence as input and randomizes a percentage p of it by choosing
SDRs at random while preserving the remaining invariant.
@param sequence (array) sequence to be randomized
@param symbolsPerSequence (int) number of symbols per sequence
@param numColumns (int) number of columns in the TM
@param sparsity (float) percentage of sparsity
@p (float) percentage of symbols to be replaced
@return randomizedSequence (array) sequence that contains p percentage of new SDRs
"""
randomizedSequence = []
sparseCols = int(numColumns * sparsity)
numSymbolsToChange = int(symbolsPerSequence * p)
symIndices = np.random.permutation(np.arange(symbolsPerSequence))
for symbol in range(symbolsPerSequence):
randomizedSequence.append(sequence[symbol])
i = 0
while numSymbolsToChange > 0:
randomizedSequence[symIndices[i]] = generateRandomSymbol(numColumns, sparseCols)
i += 1
numSymbolsToChange -= 1
return randomizedSequence |
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = "false"
if len(arguments) == 1:
include_header = arguments.pop(0)
if include_header not in ('true', 'false'):
raise ParseError("incude_header can be either true or false")
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if sys.version_info < (3, 0, 0):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(encoded_results)
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if storage_credentials['region_endpoint'] is not None:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'],
host = storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token=storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
# If column/header names are not able to fetch then use include header as true
if include_header.lower() == "true" and qlog is not None:
write_headers(qlog, fp)
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location'])) | Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3 | Below is the the instruction that describes the task:
### Input:
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
### Response:
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = "false"
if len(arguments) == 1:
include_header = arguments.pop(0)
if include_header not in ('true', 'false'):
raise ParseError("incude_header can be either true or false")
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if sys.version_info < (3, 0, 0):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(encoded_results)
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if storage_credentials['region_endpoint'] is not None:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'],
host = storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token=storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
# If column/header names are not able to fetch then use include header as true
if include_header.lower() == "true" and qlog is not None:
write_headers(qlog, fp)
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location'])) |
def ssh_sa_ssh_client_key_exchange(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
client = ET.SubElement(ssh, "client")
key_exchange = ET.SubElement(client, "key-exchange")
key_exchange.text = kwargs.pop('key_exchange')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ssh_sa_ssh_client_key_exchange(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
client = ET.SubElement(ssh, "client")
key_exchange = ET.SubElement(client, "key-exchange")
key_exchange.text = kwargs.pop('key_exchange')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _raiseValidationException(standardExcMsg, customExcMsg=None):
"""Raise ValidationException with standardExcMsg, unless customExcMsg is specified."""
if customExcMsg is None:
raise ValidationException(str(standardExcMsg))
else:
raise ValidationException(str(customExcMsg)) | Raise ValidationException with standardExcMsg, unless customExcMsg is specified. | Below is the the instruction that describes the task:
### Input:
Raise ValidationException with standardExcMsg, unless customExcMsg is specified.
### Response:
def _raiseValidationException(standardExcMsg, customExcMsg=None):
"""Raise ValidationException with standardExcMsg, unless customExcMsg is specified."""
if customExcMsg is None:
raise ValidationException(str(standardExcMsg))
else:
raise ValidationException(str(customExcMsg)) |
def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute') | Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress | Below is the the instruction that describes the task:
### Input:
Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
### Response:
def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute') |
def write(self, data, offset=0, write_through=False, unbuffered=False,
wait=True, send=True):
"""
Writes data to an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMBWriteRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param data: The bytes data to write.
:param offset: The offset in the file to write the bytes at
:param write_through: Whether written data is persisted to the
underlying storage, not valid for SMB 2.0.2.
:param unbuffered: Whether to the server should cache the write data at
intermediate layers, only value for SMB 3.0.2 or newer
:param wait: If send=True, whether to wait for a response if
STATUS_PENDING was received from the server or fail.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: The number of bytes written
"""
data_len = len(data)
if data_len > self.connection.max_write_size:
raise SMBException("The requested write length %d is greater than "
"the maximum negotiated write size %d"
% (data_len, self.connection.max_write_size))
write = SMB2WriteRequest()
write['length'] = len(data)
write['offset'] = offset
write['file_id'] = self.file_id
write['buffer'] = data
if write_through:
if self.connection.dialect < Dialects.SMB_2_1_0:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_2_1_0,
"SMB2_WRITEFLAG_WRITE_THROUGH",
True)
write['flags'].set_flag(WriteFlags.SMB2_WRITEFLAG_WRITE_THROUGH)
if unbuffered:
if self.connection.dialect < Dialects.SMB_3_0_2:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_3_0_2,
"SMB2_WRITEFLAG_WRITE_UNBUFFERED",
True)
write['flags'].set_flag(WriteFlags.SMB2_WRITEFLAG_WRITE_UNBUFFERED)
if not send:
return write, self._write_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Write Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(write))
request = self.connection.send(write,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._write_response(request, wait) | Writes data to an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMBWriteRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param data: The bytes data to write.
:param offset: The offset in the file to write the bytes at
:param write_through: Whether written data is persisted to the
underlying storage, not valid for SMB 2.0.2.
:param unbuffered: Whether to the server should cache the write data at
intermediate layers, only value for SMB 3.0.2 or newer
:param wait: If send=True, whether to wait for a response if
STATUS_PENDING was received from the server or fail.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: The number of bytes written | Below is the the instruction that describes the task:
### Input:
Writes data to an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMBWriteRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param data: The bytes data to write.
:param offset: The offset in the file to write the bytes at
:param write_through: Whether written data is persisted to the
underlying storage, not valid for SMB 2.0.2.
:param unbuffered: Whether to the server should cache the write data at
intermediate layers, only value for SMB 3.0.2 or newer
:param wait: If send=True, whether to wait for a response if
STATUS_PENDING was received from the server or fail.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: The number of bytes written
### Response:
def write(self, data, offset=0, write_through=False, unbuffered=False,
wait=True, send=True):
"""
Writes data to an opened file.
Supports out of band send function, call this function with send=False
to return a tuple of (SMBWriteRequest, receive_func) instead of
sending the the request and waiting for the response. The receive_func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band.
:param data: The bytes data to write.
:param offset: The offset in the file to write the bytes at
:param write_through: Whether written data is persisted to the
underlying storage, not valid for SMB 2.0.2.
:param unbuffered: Whether to the server should cache the write data at
intermediate layers, only value for SMB 3.0.2 or newer
:param wait: If send=True, whether to wait for a response if
STATUS_PENDING was received from the server or fail.
:param send: Whether to send the request in the same call or return the
message to the caller and the unpack function
:return: The number of bytes written
"""
data_len = len(data)
if data_len > self.connection.max_write_size:
raise SMBException("The requested write length %d is greater than "
"the maximum negotiated write size %d"
% (data_len, self.connection.max_write_size))
write = SMB2WriteRequest()
write['length'] = len(data)
write['offset'] = offset
write['file_id'] = self.file_id
write['buffer'] = data
if write_through:
if self.connection.dialect < Dialects.SMB_2_1_0:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_2_1_0,
"SMB2_WRITEFLAG_WRITE_THROUGH",
True)
write['flags'].set_flag(WriteFlags.SMB2_WRITEFLAG_WRITE_THROUGH)
if unbuffered:
if self.connection.dialect < Dialects.SMB_3_0_2:
raise SMBUnsupportedFeature(self.connection.dialect,
Dialects.SMB_3_0_2,
"SMB2_WRITEFLAG_WRITE_UNBUFFERED",
True)
write['flags'].set_flag(WriteFlags.SMB2_WRITEFLAG_WRITE_UNBUFFERED)
if not send:
return write, self._write_response
log.info("Session: %s, Tree Connect: %s - sending SMB2 Write Request "
"for file %s" % (self.tree_connect.session.username,
self.tree_connect.share_name,
self.file_name))
log.debug(str(write))
request = self.connection.send(write,
self.tree_connect.session.session_id,
self.tree_connect.tree_connect_id)
return self._write_response(request, wait) |
def policy_set_definitions(self):
"""Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>`
"""
api_version = self._get_api_version('policy_set_definitions')
if api_version == '2017-06-01-preview':
from .v2017_06_01_preview.operations import PolicySetDefinitionsOperations as OperationClass
elif api_version == '2018-03-01':
from .v2018_03_01.operations import PolicySetDefinitionsOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import PolicySetDefinitionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>`
### Response:
def policy_set_definitions(self):
"""Instance depends on the API version:
* 2017-06-01-preview: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicySetDefinitionsOperations>`
* 2018-03-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations>`
* 2018-05-01: :class:`PolicySetDefinitionsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicySetDefinitionsOperations>`
"""
api_version = self._get_api_version('policy_set_definitions')
if api_version == '2017-06-01-preview':
from .v2017_06_01_preview.operations import PolicySetDefinitionsOperations as OperationClass
elif api_version == '2018-03-01':
from .v2018_03_01.operations import PolicySetDefinitionsOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import PolicySetDefinitionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def run(self, *args, **kwargs) -> Callable:
"""Return wrapped function.
Haskell: runReader :: Reader r a -> r -> a
This is the inverse of unit and returns the wrapped function.
"""
return self.fn(*args, **kwargs) if args or kwargs else self.fn | Return wrapped function.
Haskell: runReader :: Reader r a -> r -> a
This is the inverse of unit and returns the wrapped function. | Below is the the instruction that describes the task:
### Input:
Return wrapped function.
Haskell: runReader :: Reader r a -> r -> a
This is the inverse of unit and returns the wrapped function.
### Response:
def run(self, *args, **kwargs) -> Callable:
"""Return wrapped function.
Haskell: runReader :: Reader r a -> r -> a
This is the inverse of unit and returns the wrapped function.
"""
return self.fn(*args, **kwargs) if args or kwargs else self.fn |
def permissions_for(self, member):
"""Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
o = self.guild.owner
if o is not None and member.id == o.id:
return Permissions.all()
default = self.guild.default_role
base = Permissions(default.permissions.value)
roles = member.roles
# Apply guild roles that the member has.
for role in roles:
base.value |= role.permissions.value
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:]
else:
remaining_overwrites = self._overwrites
except IndexError:
remaining_overwrites = self._overwrites
# not sure if doing member._roles.get(...) is better than the
# set approach. While this is O(N) to re-create into a set for O(1)
# the direct approach would just be O(log n) for searching with no
# extra memory overhead. For now, I'll keep the set cast
# Note that the member.roles accessor up top also creates a
# temporary list
member_role_ids = {r.id for r in roles}
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'role' and overwrite.id in member_role_ids:
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'member' and overwrite.id == member.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value
return base | Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member. | Below is the the instruction that describes the task:
### Input:
Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
### Response:
def permissions_for(self, member):
"""Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
o = self.guild.owner
if o is not None and member.id == o.id:
return Permissions.all()
default = self.guild.default_role
base = Permissions(default.permissions.value)
roles = member.roles
# Apply guild roles that the member has.
for role in roles:
base.value |= role.permissions.value
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:]
else:
remaining_overwrites = self._overwrites
except IndexError:
remaining_overwrites = self._overwrites
# not sure if doing member._roles.get(...) is better than the
# set approach. While this is O(N) to re-create into a set for O(1)
# the direct approach would just be O(log n) for searching with no
# extra memory overhead. For now, I'll keep the set cast
# Note that the member.roles accessor up top also creates a
# temporary list
member_role_ids = {r.id for r in roles}
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'role' and overwrite.id in member_role_ids:
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'member' and overwrite.id == member.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value
return base |
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.leave_transaction_management()
return
connection = tldap.backend.connections[using]
connection.leave_transaction_management() | Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.) | Below is the the instruction that describes the task:
### Input:
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
### Response:
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.leave_transaction_management()
return
connection = tldap.backend.connections[using]
connection.leave_transaction_management() |
def serialize_training_step(features, model_fn, batch_dim, num_splits):
"""Break the training batch into multiple microbatches.
Returns two structures:
grads - a list of Tensors corresponding to the gradients on
graph.trainable_variables. These are summed across all microbatches
outputs - a dictionary of Tensors corresponding to the output dictionary of
model_fn. Each value is either summed across all microbatches (if it
has no batch-dimension), or concatenated across all microbatches to
represent the original batch (if it does have a batch-dimension).
Args:
features: a dictionary of Tensors, each with a batch_dim dimension
model_fn: a function from feature dictionary to output dictionary
output_dictionary must contain "loss"
batch_dim: a Dimension
num_splits: an integer dividing batch_dim.size
Returns:
grads: a list of Tensors corresponding to the gradients on
graph.trainable_variables
outputs: dictionary of output Tensors summed across microbatches
"""
for v in features.values():
mesh = v.mesh
graph = v.graph
microbatch_dim = Dimension("microbatch", num_splits)
smaller_batch_dim = Dimension(batch_dim.name, batch_dim.size // num_splits)
cache = {}
def select(t, microbatch_num):
return gather(
replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]),
microbatch_num, microbatch_dim)
def cond_fn(microbatch_num):
return less(microbatch_num, num_splits)
def body_fn(microbatch_num):
"""Body function for mtf.while_loop.
Args:
microbatch_num: a mtf Scalar
Returns:
a list of mtf Tensors
"""
my_features = {}
for k, v in six.iteritems(features):
my_features[k] = select(v, microbatch_num)
outputs = model_fn(my_features)
grads = gradients(
[outputs["loss"]], [v.outputs[0] for v in graph.trainable_variables])
output_keys = outputs.keys()
cache["output_keys"] = output_keys
ret = []
ret.append(microbatch_num + 1)
# The rest of the returned values are "accumulators" that get summed
# across all microbatches.
for t in outputs.values():
if smaller_batch_dim in t.shape:
# The output contains a batch dimension, so we want to concatenate
# across microbatches.
# Here we pad the tensor for each microbatch - summing will complete
# the concatenation.
t = einsum(
[t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)],
output_shape=replace_dimensions(
t.shape, smaller_batch_dim,
[smaller_batch_dim, microbatch_dim]))
t = replace_dimensions(
t, [smaller_batch_dim, microbatch_dim], batch_dim)
ret.append(t)
else:
# There is no batch dimension. Sum across all microbatches.
ret.append(t)
# we also want to sum the gradients.
ret.extend(grads)
return ret
while_out = while_loop(
cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)],
has_accumulators=True)
num_outputs = len(cache["output_keys"])
combined_outputs = {}
for k, v in zip(cache["output_keys"], while_out[1:1 + num_outputs]):
combined_outputs[k] = v
combined_grads = while_out[1 + num_outputs:]
return combined_grads, combined_outputs | Break the training batch into multiple microbatches.
Returns two structures:
grads - a list of Tensors corresponding to the gradients on
graph.trainable_variables. These are summed across all microbatches
outputs - a dictionary of Tensors corresponding to the output dictionary of
model_fn. Each value is either summed across all microbatches (if it
has no batch-dimension), or concatenated across all microbatches to
represent the original batch (if it does have a batch-dimension).
Args:
features: a dictionary of Tensors, each with a batch_dim dimension
model_fn: a function from feature dictionary to output dictionary
output_dictionary must contain "loss"
batch_dim: a Dimension
num_splits: an integer dividing batch_dim.size
Returns:
grads: a list of Tensors corresponding to the gradients on
graph.trainable_variables
outputs: dictionary of output Tensors summed across microbatches | Below is the the instruction that describes the task:
### Input:
Break the training batch into multiple microbatches.
Returns two structures:
grads - a list of Tensors corresponding to the gradients on
graph.trainable_variables. These are summed across all microbatches
outputs - a dictionary of Tensors corresponding to the output dictionary of
model_fn. Each value is either summed across all microbatches (if it
has no batch-dimension), or concatenated across all microbatches to
represent the original batch (if it does have a batch-dimension).
Args:
features: a dictionary of Tensors, each with a batch_dim dimension
model_fn: a function from feature dictionary to output dictionary
output_dictionary must contain "loss"
batch_dim: a Dimension
num_splits: an integer dividing batch_dim.size
Returns:
grads: a list of Tensors corresponding to the gradients on
graph.trainable_variables
outputs: dictionary of output Tensors summed across microbatches
### Response:
def serialize_training_step(features, model_fn, batch_dim, num_splits):
"""Break the training batch into multiple microbatches.
Returns two structures:
grads - a list of Tensors corresponding to the gradients on
graph.trainable_variables. These are summed across all microbatches
outputs - a dictionary of Tensors corresponding to the output dictionary of
model_fn. Each value is either summed across all microbatches (if it
has no batch-dimension), or concatenated across all microbatches to
represent the original batch (if it does have a batch-dimension).
Args:
features: a dictionary of Tensors, each with a batch_dim dimension
model_fn: a function from feature dictionary to output dictionary
output_dictionary must contain "loss"
batch_dim: a Dimension
num_splits: an integer dividing batch_dim.size
Returns:
grads: a list of Tensors corresponding to the gradients on
graph.trainable_variables
outputs: dictionary of output Tensors summed across microbatches
"""
for v in features.values():
mesh = v.mesh
graph = v.graph
microbatch_dim = Dimension("microbatch", num_splits)
smaller_batch_dim = Dimension(batch_dim.name, batch_dim.size // num_splits)
cache = {}
def select(t, microbatch_num):
return gather(
replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]),
microbatch_num, microbatch_dim)
def cond_fn(microbatch_num):
return less(microbatch_num, num_splits)
def body_fn(microbatch_num):
"""Body function for mtf.while_loop.
Args:
microbatch_num: a mtf Scalar
Returns:
a list of mtf Tensors
"""
my_features = {}
for k, v in six.iteritems(features):
my_features[k] = select(v, microbatch_num)
outputs = model_fn(my_features)
grads = gradients(
[outputs["loss"]], [v.outputs[0] for v in graph.trainable_variables])
output_keys = outputs.keys()
cache["output_keys"] = output_keys
ret = []
ret.append(microbatch_num + 1)
# The rest of the returned values are "accumulators" that get summed
# across all microbatches.
for t in outputs.values():
if smaller_batch_dim in t.shape:
# The output contains a batch dimension, so we want to concatenate
# across microbatches.
# Here we pad the tensor for each microbatch - summing will complete
# the concatenation.
t = einsum(
[t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)],
output_shape=replace_dimensions(
t.shape, smaller_batch_dim,
[smaller_batch_dim, microbatch_dim]))
t = replace_dimensions(
t, [smaller_batch_dim, microbatch_dim], batch_dim)
ret.append(t)
else:
# There is no batch dimension. Sum across all microbatches.
ret.append(t)
# we also want to sum the gradients.
ret.extend(grads)
return ret
while_out = while_loop(
cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)],
has_accumulators=True)
num_outputs = len(cache["output_keys"])
combined_outputs = {}
for k, v in zip(cache["output_keys"], while_out[1:1 + num_outputs]):
combined_outputs[k] = v
combined_grads = while_out[1 + num_outputs:]
return combined_grads, combined_outputs |
def update_cache(album_cache, image_cache, app, client_id, ttl, album_whitelist, image_whitelist):
"""Update cache items with expired TTLs.
:param dict album_cache: Cache of Imgur albums to update. Keys are Imgur IDs, values are Album instances.
:param dict image_cache: Cache of Imgur images to update. Keys are Imgur IDs, values are Image instances.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:param iter album_whitelist: Only update these Imgur album IDs.
:param iter image_whitelist: Only update these Imgur image IDs.
"""
if not album_whitelist and not image_whitelist:
album_whitelist = list(album_cache)
image_whitelist = list(image_cache)
needs_update_album = {k: v for k, v in album_cache.items() if k in album_whitelist and not v.seconds_remaining(ttl)}
needs_update_image = {k: v for k, v in image_cache.items() if k in image_whitelist and not v.seconds_remaining(ttl)}
if not needs_update_album and not needs_update_image:
return
# If an image in an album needs to be updated, update entire album (includes all images in that album).
albums_up_to_date = [v for k, v in album_cache.items() if k not in needs_update_album]
for image_id in needs_update_image:
for album in albums_up_to_date:
if image_id in album:
needs_update_album[album.imgur_id] = album
# Update all albums.
for album in needs_update_album.values():
try:
images = album.refresh(app, client_id, 0)
except APIError:
continue
image_cache.update((i.imgur_id, i) for i in images) # New Image instances.
# Possible new Image instances, redefining needs_update. Only caring about images now.
needs_update_image = {k: v for k, v in image_cache.items() if k in image_whitelist and not v.seconds_remaining(ttl)}
# Update all images.
for image in needs_update_image.values():
try:
image.refresh(app, client_id, ttl)
except APIError:
continue | Update cache items with expired TTLs.
:param dict album_cache: Cache of Imgur albums to update. Keys are Imgur IDs, values are Album instances.
:param dict image_cache: Cache of Imgur images to update. Keys are Imgur IDs, values are Image instances.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:param iter album_whitelist: Only update these Imgur album IDs.
:param iter image_whitelist: Only update these Imgur image IDs. | Below is the the instruction that describes the task:
### Input:
Update cache items with expired TTLs.
:param dict album_cache: Cache of Imgur albums to update. Keys are Imgur IDs, values are Album instances.
:param dict image_cache: Cache of Imgur images to update. Keys are Imgur IDs, values are Image instances.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:param iter album_whitelist: Only update these Imgur album IDs.
:param iter image_whitelist: Only update these Imgur image IDs.
### Response:
def update_cache(album_cache, image_cache, app, client_id, ttl, album_whitelist, image_whitelist):
"""Update cache items with expired TTLs.
:param dict album_cache: Cache of Imgur albums to update. Keys are Imgur IDs, values are Album instances.
:param dict image_cache: Cache of Imgur images to update. Keys are Imgur IDs, values are Image instances.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str client_id: Imgur API client ID to use. https://api.imgur.com/oauth2
:param int ttl: Number of seconds before this is considered out of date.
:param iter album_whitelist: Only update these Imgur album IDs.
:param iter image_whitelist: Only update these Imgur image IDs.
"""
if not album_whitelist and not image_whitelist:
album_whitelist = list(album_cache)
image_whitelist = list(image_cache)
needs_update_album = {k: v for k, v in album_cache.items() if k in album_whitelist and not v.seconds_remaining(ttl)}
needs_update_image = {k: v for k, v in image_cache.items() if k in image_whitelist and not v.seconds_remaining(ttl)}
if not needs_update_album and not needs_update_image:
return
# If an image in an album needs to be updated, update entire album (includes all images in that album).
albums_up_to_date = [v for k, v in album_cache.items() if k not in needs_update_album]
for image_id in needs_update_image:
for album in albums_up_to_date:
if image_id in album:
needs_update_album[album.imgur_id] = album
# Update all albums.
for album in needs_update_album.values():
try:
images = album.refresh(app, client_id, 0)
except APIError:
continue
image_cache.update((i.imgur_id, i) for i in images) # New Image instances.
# Possible new Image instances, redefining needs_update. Only caring about images now.
needs_update_image = {k: v for k, v in image_cache.items() if k in image_whitelist and not v.seconds_remaining(ttl)}
# Update all images.
for image in needs_update_image.values():
try:
image.refresh(app, client_id, ttl)
except APIError:
continue |
def update_status(table_name="swdata", date_column="date"):
"""
Set the status endpoint on ScraperWiki to the latest entry e.g.
'Latest entry: 2013-10-01'
"""
status_text = 'Latest entry: {}'.format(
_get_most_recent_record(table_name, date_column))
L.info(status_text)
scraperwiki.status('ok', status_text) | Set the status endpoint on ScraperWiki to the latest entry e.g.
'Latest entry: 2013-10-01' | Below is the the instruction that describes the task:
### Input:
Set the status endpoint on ScraperWiki to the latest entry e.g.
'Latest entry: 2013-10-01'
### Response:
def update_status(table_name="swdata", date_column="date"):
"""
Set the status endpoint on ScraperWiki to the latest entry e.g.
'Latest entry: 2013-10-01'
"""
status_text = 'Latest entry: {}'.format(
_get_most_recent_record(table_name, date_column))
L.info(status_text)
scraperwiki.status('ok', status_text) |
def get_input_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
input_data = []
if task.link_input_data:
for path in task.link_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.LINK
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.LINK
}
input_data.append(temp)
if task.upload_input_data:
for path in task.upload_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
input_data.append(temp)
if task.copy_input_data:
for path in task.copy_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
input_data.append(temp)
if task.move_input_data:
for path in task.move_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
input_data.append(temp)
return input_data
except Exception, ex:
logger.exception('Failed to get input list of files from task, error: %s' % ex)
raise | Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out | Below is the the instruction that describes the task:
### Input:
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
### Response:
def get_input_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
input_data = []
if task.link_input_data:
for path in task.link_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.LINK
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.LINK
}
input_data.append(temp)
if task.upload_input_data:
for path in task.upload_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
input_data.append(temp)
if task.copy_input_data:
for path in task.copy_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
input_data.append(temp)
if task.move_input_data:
for path in task.move_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
input_data.append(temp)
return input_data
except Exception, ex:
logger.exception('Failed to get input list of files from task, error: %s' % ex)
raise |
def dt_dt(sdat, tstart=None, tend=None):
"""Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
time = tseries['t'].values
temp = tseries['Tmean'].values
dtdt = (temp[1:] - temp[:-1]) / (time[1:] - time[:-1])
return dtdt, time[:-1] | Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays. | Below is the the instruction that describes the task:
### Input:
Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays.
### Response:
def dt_dt(sdat, tstart=None, tend=None):
"""Derivative of temperature.
Compute dT/dt as a function of time using an explicit Euler scheme.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: derivative of temperature and time
arrays.
"""
tseries = sdat.tseries_between(tstart, tend)
time = tseries['t'].values
temp = tseries['Tmean'].values
dtdt = (temp[1:] - temp[:-1]) / (time[1:] - time[:-1])
return dtdt, time[:-1] |
def punchcard(self, branch='master', limit=None, days=None, by=None, normalize=None, ignore_globs=None, include_globs=None):
"""
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author', 'repository'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
df = pd.DataFrame()
if by == 'repository':
repo_by = None
else:
repo_by = by
for repo in self.repos:
try:
chunk = repo.punchcard(
branch=branch,
limit=limit,
days=days,
by=repo_by,
normalize=None,
ignore_globs=ignore_globs,
include_globs=include_globs
)
chunk['repository'] = repo.repo_name
df = df.append(chunk)
except GitCommandError:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
df.reset_index()
aggs = ['hour_of_day', 'day_of_week']
if by is not None:
aggs.append(by)
punch_card = df.groupby(aggs).agg({
'lines': np.sum,
'insertions': np.sum,
'deletions': np.sum,
'net': np.sum
})
punch_card.reset_index(inplace=True)
# normalize all cols
if normalize is not None:
for col in ['lines', 'insertions', 'deletions', 'net']:
punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize
return punch_card | Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author', 'repository'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame | Below is the the instruction that describes the task:
### Input:
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author', 'repository'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
### Response:
def punchcard(self, branch='master', limit=None, days=None, by=None, normalize=None, ignore_globs=None, include_globs=None):
"""
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author', 'repository'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
df = pd.DataFrame()
if by == 'repository':
repo_by = None
else:
repo_by = by
for repo in self.repos:
try:
chunk = repo.punchcard(
branch=branch,
limit=limit,
days=days,
by=repo_by,
normalize=None,
ignore_globs=ignore_globs,
include_globs=include_globs
)
chunk['repository'] = repo.repo_name
df = df.append(chunk)
except GitCommandError:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
df.reset_index()
aggs = ['hour_of_day', 'day_of_week']
if by is not None:
aggs.append(by)
punch_card = df.groupby(aggs).agg({
'lines': np.sum,
'insertions': np.sum,
'deletions': np.sum,
'net': np.sum
})
punch_card.reset_index(inplace=True)
# normalize all cols
if normalize is not None:
for col in ['lines', 'insertions', 'deletions', 'net']:
punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize
return punch_card |
def get_agents(self, addr=True, agent_cls=None, as_coro=False):
"""Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design.
"""
return self.env.get_agents(addr=addr, agent_cls=agent_cls) | Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design. | Below is the the instruction that describes the task:
### Input:
Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design.
### Response:
def get_agents(self, addr=True, agent_cls=None, as_coro=False):
"""Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design.
"""
return self.env.get_agents(addr=addr, agent_cls=agent_cls) |
def LikelihoodFunction(Template, Data, PSD, detRespP, detGCDelay=0):
""" LikelihoodFunction - function to calculate the likelihood of livePoint,
given Data.
Template - (N_fd) complex array containing Fourier domain trial signal.
Data - (N_fd) complex array containing Fourier domain GW data.
PSD - Noise power spectral density for a gravitational wave detector.
detRespP - Antenna response to the plus GW polarisation for the
detector.
detGCDelay - Time delay of detector from geocenter (default = 0, use
detGCDelay only if computing logL for more than one
detector.
Returns logL of Template.
Sarah Gossan 2012. Last updated 02/18/14. """
# Correct template for geocenter delay and antenna response function
if detGCDelay:
phaseGCDelay = -2.*np.pi*np.linspace(0,N_fd-1,num=N_fd)*dF*detGCDelay*1j
Template *= phaseGCDelay
Template *= detRespP
# Calculate logL - simple Gaussian
logL = -2.*dF*np.sum(pow(abs(Data[lowBin:] - Template[lowBin:]),2.)/\
PSD[lowBin:])
return logL | LikelihoodFunction - function to calculate the likelihood of livePoint,
given Data.
Template - (N_fd) complex array containing Fourier domain trial signal.
Data - (N_fd) complex array containing Fourier domain GW data.
PSD - Noise power spectral density for a gravitational wave detector.
detRespP - Antenna response to the plus GW polarisation for the
detector.
detGCDelay - Time delay of detector from geocenter (default = 0, use
detGCDelay only if computing logL for more than one
detector.
Returns logL of Template.
Sarah Gossan 2012. Last updated 02/18/14. | Below is the the instruction that describes the task:
### Input:
LikelihoodFunction - function to calculate the likelihood of livePoint,
given Data.
Template - (N_fd) complex array containing Fourier domain trial signal.
Data - (N_fd) complex array containing Fourier domain GW data.
PSD - Noise power spectral density for a gravitational wave detector.
detRespP - Antenna response to the plus GW polarisation for the
detector.
detGCDelay - Time delay of detector from geocenter (default = 0, use
detGCDelay only if computing logL for more than one
detector.
Returns logL of Template.
Sarah Gossan 2012. Last updated 02/18/14.
### Response:
def LikelihoodFunction(Template, Data, PSD, detRespP, detGCDelay=0):
""" LikelihoodFunction - function to calculate the likelihood of livePoint,
given Data.
Template - (N_fd) complex array containing Fourier domain trial signal.
Data - (N_fd) complex array containing Fourier domain GW data.
PSD - Noise power spectral density for a gravitational wave detector.
detRespP - Antenna response to the plus GW polarisation for the
detector.
detGCDelay - Time delay of detector from geocenter (default = 0, use
detGCDelay only if computing logL for more than one
detector.
Returns logL of Template.
Sarah Gossan 2012. Last updated 02/18/14. """
# Correct template for geocenter delay and antenna response function
if detGCDelay:
phaseGCDelay = -2.*np.pi*np.linspace(0,N_fd-1,num=N_fd)*dF*detGCDelay*1j
Template *= phaseGCDelay
Template *= detRespP
# Calculate logL - simple Gaussian
logL = -2.*dF*np.sum(pow(abs(Data[lowBin:] - Template[lowBin:]),2.)/\
PSD[lowBin:])
return logL |
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result) | Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult | Below is the the instruction that describes the task:
### Input:
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
### Response:
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result) |
def track_execution(cmd, project, experiment, **kwargs):
"""Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
"""
runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs)
yield runner
runner.commit() | Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction. | Below is the the instruction that describes the task:
### Input:
Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
### Response:
def track_execution(cmd, project, experiment, **kwargs):
"""Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
"""
runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs)
yield runner
runner.commit() |
def format_python2_stmts(python_stmts, show_tokens=False, showast=False,
showgrammar=False, compile_mode='exec'):
"""
formats python2 statements
"""
parser_debug = {'rules': False, 'transition': False,
'reduce': showgrammar,
'errorstack': True, 'context': True, 'dups': True }
parsed = parse_python2(python_stmts, show_tokens=show_tokens,
parser_debug=parser_debug)
assert parsed == 'file_input', 'Should have parsed grammar start'
formatter = Python2Formatter()
if showast:
print(parsed)
# What we've been waiting for: Generate source from AST!
python2_formatted_str = formatter.traverse(parsed)
return python2_formatted_str | formats python2 statements | Below is the the instruction that describes the task:
### Input:
formats python2 statements
### Response:
def format_python2_stmts(python_stmts, show_tokens=False, showast=False,
showgrammar=False, compile_mode='exec'):
"""
formats python2 statements
"""
parser_debug = {'rules': False, 'transition': False,
'reduce': showgrammar,
'errorstack': True, 'context': True, 'dups': True }
parsed = parse_python2(python_stmts, show_tokens=show_tokens,
parser_debug=parser_debug)
assert parsed == 'file_input', 'Should have parsed grammar start'
formatter = Python2Formatter()
if showast:
print(parsed)
# What we've been waiting for: Generate source from AST!
python2_formatted_str = formatter.traverse(parsed)
return python2_formatted_str |
def get_job_details(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return job details for a specific job id
"""
jobs = get_jobs(deployment_name,
token_manager=token_manager,
app_url=app_url)
for job in jobs:
if job['id'] == job_id:
return job
raise JutException('Unable to find job with id "%s"' % job_id) | return job details for a specific job id | Below is the the instruction that describes the task:
### Input:
return job details for a specific job id
### Response:
def get_job_details(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return job details for a specific job id
"""
jobs = get_jobs(deployment_name,
token_manager=token_manager,
app_url=app_url)
for job in jobs:
if job['id'] == job_id:
return job
raise JutException('Unable to find job with id "%s"' % job_id) |
def mtabstr2doestr(st1):
"""mtabstr2doestr"""
seperator = '$ =============='
alist = st1.split(seperator)
#this removes all the tabs that excel
#puts after the seperator and before the next line
for num in range(0, len(alist)):
alist[num] = alist[num].lstrip()
st2 = ''
for num in range(0, len(alist)):
alist = tabstr2list(alist[num])
st2 = st2 + list2doe(alist)
lss = st2.split('..')
mylib1.write_str2file('forfinal.txt', st2)#for debugging
print(len(lss))
st3 = tree2doe(st2)
lsss = st3.split('..')
print(len(lsss))
return st3 | mtabstr2doestr | Below is the the instruction that describes the task:
### Input:
mtabstr2doestr
### Response:
def mtabstr2doestr(st1):
"""mtabstr2doestr"""
seperator = '$ =============='
alist = st1.split(seperator)
#this removes all the tabs that excel
#puts after the seperator and before the next line
for num in range(0, len(alist)):
alist[num] = alist[num].lstrip()
st2 = ''
for num in range(0, len(alist)):
alist = tabstr2list(alist[num])
st2 = st2 + list2doe(alist)
lss = st2.split('..')
mylib1.write_str2file('forfinal.txt', st2)#for debugging
print(len(lss))
st3 = tree2doe(st2)
lsss = st3.split('..')
print(len(lsss))
return st3 |
def poke_publication_state(publication_id, cursor):
"""Invoked to poke at the publication to update and acquire its current
state. This is used to persist the publication to archive.
"""
cursor.execute("""\
SELECT "state", "state_messages", "is_pre_publication", "publisher"
FROM publications
WHERE id = %s""", (publication_id,))
row = cursor.fetchone()
current_state, messages, is_pre_publication, publisher = row
if current_state in END_N_INTERIM_STATES:
# Bailout early, because the publication is either in progress
# or has been completed.
return current_state, messages
# Check for acceptance...
cursor.execute("""\
SELECT
pd.id, license_accepted, roles_accepted
FROM publications AS p JOIN pending_documents AS pd ON p.id = pd.publication_id
WHERE p.id = %s
""", (publication_id,))
pending_document_states = cursor.fetchall()
publication_state_mapping = {}
for document_state in pending_document_states:
id, is_license_accepted, are_roles_accepted = document_state
publication_state_mapping[id] = [is_license_accepted,
are_roles_accepted]
has_changed_state = False
if is_license_accepted and are_roles_accepted:
continue
if not is_license_accepted:
accepted = _check_pending_document_license_state(
cursor, id)
if accepted != is_license_accepted:
has_changed_state = True
is_license_accepted = accepted
publication_state_mapping[id][0] = accepted
if not are_roles_accepted:
accepted = _check_pending_document_role_state(
cursor, id)
if accepted != are_roles_accepted:
has_changed_state = True
are_roles_accepted = accepted
publication_state_mapping[id][1] = accepted
if has_changed_state:
_update_pending_document_state(cursor, id,
is_license_accepted,
are_roles_accepted)
# Are all the documents ready for publication?
state_lump = set([l and r for l, r in publication_state_mapping.values()])
is_publish_ready = not (False in state_lump) and not (None in state_lump)
change_state = "Done/Success"
if not is_publish_ready:
change_state = "Waiting for acceptance"
# Does this publication need moderation? (ignore on pre-publication)
# TODO Is this a revision publication? If so, it doesn't matter who the
# user is, because they have been vetted by the previous publisher.
# This has loopholes...
if not is_pre_publication and is_publish_ready:
# Has this publisher been moderated before?
cursor.execute("""\
SELECT is_moderated
FROM users AS u LEFT JOIN publications AS p ON (u.username = p.publisher)
WHERE p.id = %s""",
(publication_id,))
try:
is_publisher_moderated = cursor.fetchone()[0]
except TypeError:
is_publisher_moderated = False
# Are any of these documents a revision? Thus vetting of
# the publisher was done by a vetted peer.
if not is_publisher_moderated \
and not is_revision_publication(publication_id, cursor):
# Hold up! This publish needs moderation.
change_state = "Waiting for moderation"
is_publish_ready = False
# Publish the pending documents.
if is_publish_ready:
change_state = "Done/Success"
if not is_pre_publication:
publication_state = publish_pending(cursor, publication_id)
else:
cursor.execute("""\
UPDATE publications
SET state = %s
WHERE id = %s
RETURNING state, state_messages""", (change_state, publication_id,))
publication_state, messages = cursor.fetchone()
else:
# `change_state` set prior to this...
cursor.execute("""\
UPDATE publications
SET state = %s
WHERE id = %s
RETURNING state, state_messages""", (change_state, publication_id,))
publication_state, messages = cursor.fetchone()
return publication_state, messages | Invoked to poke at the publication to update and acquire its current
state. This is used to persist the publication to archive. | Below is the the instruction that describes the task:
### Input:
Invoked to poke at the publication to update and acquire its current
state. This is used to persist the publication to archive.
### Response:
def poke_publication_state(publication_id, cursor):
"""Invoked to poke at the publication to update and acquire its current
state. This is used to persist the publication to archive.
"""
cursor.execute("""\
SELECT "state", "state_messages", "is_pre_publication", "publisher"
FROM publications
WHERE id = %s""", (publication_id,))
row = cursor.fetchone()
current_state, messages, is_pre_publication, publisher = row
if current_state in END_N_INTERIM_STATES:
# Bailout early, because the publication is either in progress
# or has been completed.
return current_state, messages
# Check for acceptance...
cursor.execute("""\
SELECT
pd.id, license_accepted, roles_accepted
FROM publications AS p JOIN pending_documents AS pd ON p.id = pd.publication_id
WHERE p.id = %s
""", (publication_id,))
pending_document_states = cursor.fetchall()
publication_state_mapping = {}
for document_state in pending_document_states:
id, is_license_accepted, are_roles_accepted = document_state
publication_state_mapping[id] = [is_license_accepted,
are_roles_accepted]
has_changed_state = False
if is_license_accepted and are_roles_accepted:
continue
if not is_license_accepted:
accepted = _check_pending_document_license_state(
cursor, id)
if accepted != is_license_accepted:
has_changed_state = True
is_license_accepted = accepted
publication_state_mapping[id][0] = accepted
if not are_roles_accepted:
accepted = _check_pending_document_role_state(
cursor, id)
if accepted != are_roles_accepted:
has_changed_state = True
are_roles_accepted = accepted
publication_state_mapping[id][1] = accepted
if has_changed_state:
_update_pending_document_state(cursor, id,
is_license_accepted,
are_roles_accepted)
# Are all the documents ready for publication?
state_lump = set([l and r for l, r in publication_state_mapping.values()])
is_publish_ready = not (False in state_lump) and not (None in state_lump)
change_state = "Done/Success"
if not is_publish_ready:
change_state = "Waiting for acceptance"
# Does this publication need moderation? (ignore on pre-publication)
# TODO Is this a revision publication? If so, it doesn't matter who the
# user is, because they have been vetted by the previous publisher.
# This has loopholes...
if not is_pre_publication and is_publish_ready:
# Has this publisher been moderated before?
cursor.execute("""\
SELECT is_moderated
FROM users AS u LEFT JOIN publications AS p ON (u.username = p.publisher)
WHERE p.id = %s""",
(publication_id,))
try:
is_publisher_moderated = cursor.fetchone()[0]
except TypeError:
is_publisher_moderated = False
# Are any of these documents a revision? Thus vetting of
# the publisher was done by a vetted peer.
if not is_publisher_moderated \
and not is_revision_publication(publication_id, cursor):
# Hold up! This publish needs moderation.
change_state = "Waiting for moderation"
is_publish_ready = False
# Publish the pending documents.
if is_publish_ready:
change_state = "Done/Success"
if not is_pre_publication:
publication_state = publish_pending(cursor, publication_id)
else:
cursor.execute("""\
UPDATE publications
SET state = %s
WHERE id = %s
RETURNING state, state_messages""", (change_state, publication_id,))
publication_state, messages = cursor.fetchone()
else:
# `change_state` set prior to this...
cursor.execute("""\
UPDATE publications
SET state = %s
WHERE id = %s
RETURNING state, state_messages""", (change_state, publication_id,))
publication_state, messages = cursor.fetchone()
return publication_state, messages |
def handle_offchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
""" Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the merkle tree and the
transferred amount updated.
"""
iteration: TransitionResult[InitiatorTransferState]
valid_reveal = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=initiator_state.transfer_description.secrethash,
secret=state_change.secret,
)
sent_by_partner = state_change.sender == channel_state.partner_state.address
is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED
if valid_reveal and is_channel_open and sent_by_partner:
events = events_for_unlock_lock(
initiator_state=initiator_state,
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
pseudo_random_generator=pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration | Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the merkle tree and the
transferred amount updated. | Below is the the instruction that describes the task:
### Input:
Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the merkle tree and the
transferred amount updated.
### Response:
def handle_offchain_secretreveal(
initiator_state: InitiatorTransferState,
state_change: ReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
) -> TransitionResult[InitiatorTransferState]:
""" Once the next hop proves it knows the secret, the initiator can unlock
the mediated transfer.
This will validate the secret, and if valid a new balance proof is sent to
the next hop with the current lock removed from the merkle tree and the
transferred amount updated.
"""
iteration: TransitionResult[InitiatorTransferState]
valid_reveal = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=initiator_state.transfer_description.secrethash,
secret=state_change.secret,
)
sent_by_partner = state_change.sender == channel_state.partner_state.address
is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED
if valid_reveal and is_channel_open and sent_by_partner:
events = events_for_unlock_lock(
initiator_state=initiator_state,
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
pseudo_random_generator=pseudo_random_generator,
)
iteration = TransitionResult(None, events)
else:
events = list()
iteration = TransitionResult(initiator_state, events)
return iteration |
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application.
if add_sc_to_tip is True, the shortcut is added to the
action's tooltip
"""
self.main.register_shortcut(qaction_or_qshortcut, context,
name, add_sc_to_tip) | Register QAction or QShortcut to Spyder main application.
if add_sc_to_tip is True, the shortcut is added to the
action's tooltip | Below is the the instruction that describes the task:
### Input:
Register QAction or QShortcut to Spyder main application.
if add_sc_to_tip is True, the shortcut is added to the
action's tooltip
### Response:
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application.
if add_sc_to_tip is True, the shortcut is added to the
action's tooltip
"""
self.main.register_shortcut(qaction_or_qshortcut, context,
name, add_sc_to_tip) |
def addDependency(self, item):
"""
Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem>
"""
if item in self._dependencies:
return
viewItem = XGanttDepItem(self, item)
self._dependencies[item] = viewItem
item._reverseDependencies[self] = viewItem
self.syncDependencies() | Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem> | Below is the the instruction that describes the task:
### Input:
Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem>
### Response:
def addDependency(self, item):
"""
Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem>
"""
if item in self._dependencies:
return
viewItem = XGanttDepItem(self, item)
self._dependencies[item] = viewItem
item._reverseDependencies[self] = viewItem
self.syncDependencies() |
def validate_required(self, value):
''' Validates the given value agains this field's 'required' property
'''
if self.required and (value is None or value==''):
raise MissingFieldError(self.name) | Validates the given value agains this field's 'required' property | Below is the the instruction that describes the task:
### Input:
Validates the given value agains this field's 'required' property
### Response:
def validate_required(self, value):
''' Validates the given value agains this field's 'required' property
'''
if self.required and (value is None or value==''):
raise MissingFieldError(self.name) |
def compareAaReads(read1, read2, gapChars='-', offsets=None):
"""
Compare two amino acid sequences.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param gapChars: An object supporting __contains__ with characters that
should be considered to be gaps.
@param offsets: If not C{None}, a C{set} of offsets of interest. Offsets
not in the set will not be considered.
@return: A C{dict} with information about the match and the individual
sequences (see below).
"""
matchCount = 0
gapMismatchCount = nonGapMismatchCount = gapGapMismatchCount = 0
read1ExtraCount = read2ExtraCount = 0
read1GapOffsets = []
read2GapOffsets = []
for offset, (a, b) in enumerate(zip_longest(read1.sequence.upper(),
read2.sequence.upper())):
# Use 'is not None' in the following to allow an empty offsets set
# to be passed.
if offsets is not None and offset not in offsets:
continue
if a is None:
# b has an extra character at its end (it cannot be None).
assert b is not None
read2ExtraCount += 1
if b in gapChars:
read2GapOffsets.append(offset)
elif b is None:
# a has an extra character at its end.
read1ExtraCount += 1
if a in gapChars:
read1GapOffsets.append(offset)
else:
# We have a character from both sequences (they could still be
# gap characters).
if a in gapChars:
read1GapOffsets.append(offset)
if b in gapChars:
# Both are gaps. This can happen (though hopefully not
# if the sequences were pairwise aligned).
gapGapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# a is a gap, b is not.
gapMismatchCount += 1
else:
if b in gapChars:
# b is a gap, a is not.
gapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# Neither is a gap character.
if a == b:
matchCount += 1
else:
nonGapMismatchCount += 1
return {
'match': {
'matchCount': matchCount,
'gapMismatchCount': gapMismatchCount,
'gapGapMismatchCount': gapGapMismatchCount,
'nonGapMismatchCount': nonGapMismatchCount,
},
'read1': {
'extraCount': read1ExtraCount,
'gapOffsets': read1GapOffsets,
},
'read2': {
'extraCount': read2ExtraCount,
'gapOffsets': read2GapOffsets,
},
} | Compare two amino acid sequences.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param gapChars: An object supporting __contains__ with characters that
should be considered to be gaps.
@param offsets: If not C{None}, a C{set} of offsets of interest. Offsets
not in the set will not be considered.
@return: A C{dict} with information about the match and the individual
sequences (see below). | Below is the the instruction that describes the task:
### Input:
Compare two amino acid sequences.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param gapChars: An object supporting __contains__ with characters that
should be considered to be gaps.
@param offsets: If not C{None}, a C{set} of offsets of interest. Offsets
not in the set will not be considered.
@return: A C{dict} with information about the match and the individual
sequences (see below).
### Response:
def compareAaReads(read1, read2, gapChars='-', offsets=None):
"""
Compare two amino acid sequences.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param gapChars: An object supporting __contains__ with characters that
should be considered to be gaps.
@param offsets: If not C{None}, a C{set} of offsets of interest. Offsets
not in the set will not be considered.
@return: A C{dict} with information about the match and the individual
sequences (see below).
"""
matchCount = 0
gapMismatchCount = nonGapMismatchCount = gapGapMismatchCount = 0
read1ExtraCount = read2ExtraCount = 0
read1GapOffsets = []
read2GapOffsets = []
for offset, (a, b) in enumerate(zip_longest(read1.sequence.upper(),
read2.sequence.upper())):
# Use 'is not None' in the following to allow an empty offsets set
# to be passed.
if offsets is not None and offset not in offsets:
continue
if a is None:
# b has an extra character at its end (it cannot be None).
assert b is not None
read2ExtraCount += 1
if b in gapChars:
read2GapOffsets.append(offset)
elif b is None:
# a has an extra character at its end.
read1ExtraCount += 1
if a in gapChars:
read1GapOffsets.append(offset)
else:
# We have a character from both sequences (they could still be
# gap characters).
if a in gapChars:
read1GapOffsets.append(offset)
if b in gapChars:
# Both are gaps. This can happen (though hopefully not
# if the sequences were pairwise aligned).
gapGapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# a is a gap, b is not.
gapMismatchCount += 1
else:
if b in gapChars:
# b is a gap, a is not.
gapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# Neither is a gap character.
if a == b:
matchCount += 1
else:
nonGapMismatchCount += 1
return {
'match': {
'matchCount': matchCount,
'gapMismatchCount': gapMismatchCount,
'gapGapMismatchCount': gapGapMismatchCount,
'nonGapMismatchCount': nonGapMismatchCount,
},
'read1': {
'extraCount': read1ExtraCount,
'gapOffsets': read1GapOffsets,
},
'read2': {
'extraCount': read2ExtraCount,
'gapOffsets': read2GapOffsets,
},
} |
async def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(b('BY'))
pieces.append(by)
if start is not None and num is not None:
pieces.append(b('LIMIT'))
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, str):
pieces.append(b('GET'))
pieces.append(get)
else:
for g in get:
pieces.append(b('GET'))
pieces.append(g)
if desc:
pieces.append(b('DESC'))
if alpha:
pieces.append(b('ALPHA'))
if store is not None:
pieces.append(b('STORE'))
pieces.append(store)
if groups:
if not get or isinstance(get, str) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return await self.execute_command('SORT', *pieces, **options) | Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``. | Below is the the instruction that describes the task:
### Input:
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
### Response:
async def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(b('BY'))
pieces.append(by)
if start is not None and num is not None:
pieces.append(b('LIMIT'))
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, str):
pieces.append(b('GET'))
pieces.append(get)
else:
for g in get:
pieces.append(b('GET'))
pieces.append(g)
if desc:
pieces.append(b('DESC'))
if alpha:
pieces.append(b('ALPHA'))
if store is not None:
pieces.append(b('STORE'))
pieces.append(store)
if groups:
if not get or isinstance(get, str) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return await self.execute_command('SORT', *pieces, **options) |
def _pad(string, size):
"""
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
"""
strlen = len(string)
if strlen == size:
return string
if strlen < size:
return _padding[0:size-strlen] + string
return string[-size:] | 'Pad' a string with leading zeroes to fit the given size, truncating
if necessary. | Below is the the instruction that describes the task:
### Input:
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
### Response:
def _pad(string, size):
"""
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
"""
strlen = len(string)
if strlen == size:
return string
if strlen < size:
return _padding[0:size-strlen] + string
return string[-size:] |
def difference_update(self, *others):
r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
self.discard(element, multiplicity) | r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. | Below is the the instruction that describes the task:
### Input:
r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
### Response:
def difference_update(self, *others):
r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
self.discard(element, multiplicity) |
def load_path_with_default(self, path, default_constructor):
'''
Same as `load_path(path)', except uses default_constructor on import
errors, or if loaded a auto-generated namespace package (e.g. bare
directory).
'''
try:
imported_obj = self.load_path(path)
except (ImportError, ConfigurationError):
imported_obj = default_constructor(path)
else:
# Ugly but seemingly expedient way to check a module was an
# namespace type module
if (isinstance(imported_obj, ModuleType) and
imported_obj.__spec__.origin == 'namespace'):
imported_obj = default_constructor(path)
return imported_obj | Same as `load_path(path)', except uses default_constructor on import
errors, or if loaded a auto-generated namespace package (e.g. bare
directory). | Below is the the instruction that describes the task:
### Input:
Same as `load_path(path)', except uses default_constructor on import
errors, or if loaded a auto-generated namespace package (e.g. bare
directory).
### Response:
def load_path_with_default(self, path, default_constructor):
'''
Same as `load_path(path)', except uses default_constructor on import
errors, or if loaded a auto-generated namespace package (e.g. bare
directory).
'''
try:
imported_obj = self.load_path(path)
except (ImportError, ConfigurationError):
imported_obj = default_constructor(path)
else:
# Ugly but seemingly expedient way to check a module was an
# namespace type module
if (isinstance(imported_obj, ModuleType) and
imported_obj.__spec__.origin == 'namespace'):
imported_obj = default_constructor(path)
return imported_obj |
def _water(cls, T, P):
"""Get properties of pure water, Table4 pag 8"""
water = IAPWS95(P=P, T=T)
prop = {}
prop["g"] = water.h-T*water.s
prop["gt"] = -water.s
prop["gp"] = 1./water.rho
prop["gtt"] = -water.cp/T
prop["gtp"] = water.betas*water.cp/T
prop["gpp"] = -1e6/(water.rho*water.w)**2-water.betas**2*1e3*water.cp/T
prop["gs"] = 0
prop["gsp"] = 0
prop["thcond"] = water.k
return prop | Get properties of pure water, Table4 pag 8 | Below is the the instruction that describes the task:
### Input:
Get properties of pure water, Table4 pag 8
### Response:
def _water(cls, T, P):
"""Get properties of pure water, Table4 pag 8"""
water = IAPWS95(P=P, T=T)
prop = {}
prop["g"] = water.h-T*water.s
prop["gt"] = -water.s
prop["gp"] = 1./water.rho
prop["gtt"] = -water.cp/T
prop["gtp"] = water.betas*water.cp/T
prop["gpp"] = -1e6/(water.rho*water.w)**2-water.betas**2*1e3*water.cp/T
prop["gs"] = 0
prop["gsp"] = 0
prop["thcond"] = water.k
return prop |
def network_create(auth=None, **kwargs):
'''
Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_network(**kwargs) | Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548 | Below is the the instruction that describes the task:
### Input:
Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548
### Response:
def network_create(auth=None, **kwargs):
'''
Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_network(**kwargs) |
def from_str(cls, string):
"""
Creates a literal from a string
Parameters
----------
string : str
If the string starts with '!', it's interpreted as a negated variable
Returns
-------
caspo.core.literal.Literal
Created object instance
"""
if string[0] == '!':
signature = -1
variable = string[1:]
else:
signature = 1
variable = string
return cls(variable, signature) | Creates a literal from a string
Parameters
----------
string : str
If the string starts with '!', it's interpreted as a negated variable
Returns
-------
caspo.core.literal.Literal
Created object instance | Below is the the instruction that describes the task:
### Input:
Creates a literal from a string
Parameters
----------
string : str
If the string starts with '!', it's interpreted as a negated variable
Returns
-------
caspo.core.literal.Literal
Created object instance
### Response:
def from_str(cls, string):
"""
Creates a literal from a string
Parameters
----------
string : str
If the string starts with '!', it's interpreted as a negated variable
Returns
-------
caspo.core.literal.Literal
Created object instance
"""
if string[0] == '!':
signature = -1
variable = string[1:]
else:
signature = 1
variable = string
return cls(variable, signature) |
def get_context(self, language):
""" Get the context(description) of this task """
context = self.gettext(language, self._context) if self._context else ""
vals = self._hook_manager.call_hook('task_context', course=self.get_course(), task=self, default=context)
return ParsableText(vals[0], "rst", self._translations.get(language, gettext.NullTranslations())) if len(vals) \
else ParsableText(context, "rst", self._translations.get(language, gettext.NullTranslations())) | Get the context(description) of this task | Below is the the instruction that describes the task:
### Input:
Get the context(description) of this task
### Response:
def get_context(self, language):
""" Get the context(description) of this task """
context = self.gettext(language, self._context) if self._context else ""
vals = self._hook_manager.call_hook('task_context', course=self.get_course(), task=self, default=context)
return ParsableText(vals[0], "rst", self._translations.get(language, gettext.NullTranslations())) if len(vals) \
else ParsableText(context, "rst", self._translations.get(language, gettext.NullTranslations())) |
def _build_regular_workflow(json_spec):
"""
Precondition: json_spec must be validated
"""
workflow_id = dxpy.api.workflow_new(json_spec)["id"]
dxpy.api.workflow_close(workflow_id)
return workflow_id | Precondition: json_spec must be validated | Below is the the instruction that describes the task:
### Input:
Precondition: json_spec must be validated
### Response:
def _build_regular_workflow(json_spec):
"""
Precondition: json_spec must be validated
"""
workflow_id = dxpy.api.workflow_new(json_spec)["id"]
dxpy.api.workflow_close(workflow_id)
return workflow_id |
def _remove_white_background(image):
"""Remove white background in the preview image."""
from PIL import ImageMath, Image
if image.mode == "RGBA":
bands = image.split()
a = bands[3]
rgb = [
ImageMath.eval(
'convert('
'float(x + a - 255) * 255.0 / float(max(a, 1)) * '
'float(min(a, 1)) + float(x) * float(1 - min(a, 1))'
', "L")',
x=x, a=a
)
for x in bands[:3]
]
return Image.merge(bands=rgb + [a], mode="RGBA")
return image | Remove white background in the preview image. | Below is the the instruction that describes the task:
### Input:
Remove white background in the preview image.
### Response:
def _remove_white_background(image):
"""Remove white background in the preview image."""
from PIL import ImageMath, Image
if image.mode == "RGBA":
bands = image.split()
a = bands[3]
rgb = [
ImageMath.eval(
'convert('
'float(x + a - 255) * 255.0 / float(max(a, 1)) * '
'float(min(a, 1)) + float(x) * float(1 - min(a, 1))'
', "L")',
x=x, a=a
)
for x in bands[:3]
]
return Image.merge(bands=rgb + [a], mode="RGBA")
return image |
def xywh_from_points(points):
"""
Constructs an dict representing a rectangle with keys x, y, w, h
"""
xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')]
minx = sys.maxsize
miny = sys.maxsize
maxx = 0
maxy = 0
for xy in xys:
if xy[0] < minx:
minx = xy[0]
if xy[0] > maxx:
maxx = xy[0]
if xy[1] < miny:
miny = xy[1]
if xy[1] > maxy:
maxy = xy[1]
return {
'x': minx,
'y': miny,
'w': maxx - minx,
'h': maxy - miny,
} | Constructs an dict representing a rectangle with keys x, y, w, h | Below is the the instruction that describes the task:
### Input:
Constructs an dict representing a rectangle with keys x, y, w, h
### Response:
def xywh_from_points(points):
"""
Constructs an dict representing a rectangle with keys x, y, w, h
"""
xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')]
minx = sys.maxsize
miny = sys.maxsize
maxx = 0
maxy = 0
for xy in xys:
if xy[0] < minx:
minx = xy[0]
if xy[0] > maxx:
maxx = xy[0]
if xy[1] < miny:
miny = xy[1]
if xy[1] > maxy:
maxy = xy[1]
return {
'x': minx,
'y': miny,
'w': maxx - minx,
'h': maxy - miny,
} |
def fitLine(points, c="orange", lw=1):
"""
Fits a line through points.
Extra info is stored in ``actor.info['slope']``, ``actor.info['center']``, ``actor.info['variances']``.
.. hint:: |fitline| |fitline.py|_
"""
data = np.array(points)
datamean = data.mean(axis=0)
uu, dd, vv = np.linalg.svd(data - datamean)
vv = vv[0] / np.linalg.norm(vv[0])
# vv contains the first principal component, i.e. the direction
# vector of the best fit line in the least squares sense.
xyz_min = points.min(axis=0)
xyz_max = points.max(axis=0)
a = np.linalg.norm(xyz_min - datamean)
b = np.linalg.norm(xyz_max - datamean)
p1 = datamean - a * vv
p2 = datamean + b * vv
l = vs.Line(p1, p2, c=c, lw=lw, alpha=1)
l.info["slope"] = vv
l.info["center"] = datamean
l.info["variances"] = dd
return l | Fits a line through points.
Extra info is stored in ``actor.info['slope']``, ``actor.info['center']``, ``actor.info['variances']``.
.. hint:: |fitline| |fitline.py|_ | Below is the the instruction that describes the task:
### Input:
Fits a line through points.
Extra info is stored in ``actor.info['slope']``, ``actor.info['center']``, ``actor.info['variances']``.
.. hint:: |fitline| |fitline.py|_
### Response:
def fitLine(points, c="orange", lw=1):
"""
Fits a line through points.
Extra info is stored in ``actor.info['slope']``, ``actor.info['center']``, ``actor.info['variances']``.
.. hint:: |fitline| |fitline.py|_
"""
data = np.array(points)
datamean = data.mean(axis=0)
uu, dd, vv = np.linalg.svd(data - datamean)
vv = vv[0] / np.linalg.norm(vv[0])
# vv contains the first principal component, i.e. the direction
# vector of the best fit line in the least squares sense.
xyz_min = points.min(axis=0)
xyz_max = points.max(axis=0)
a = np.linalg.norm(xyz_min - datamean)
b = np.linalg.norm(xyz_max - datamean)
p1 = datamean - a * vv
p2 = datamean + b * vv
l = vs.Line(p1, p2, c=c, lw=lw, alpha=1)
l.info["slope"] = vv
l.info["center"] = datamean
l.info["variances"] = dd
return l |
def get_tokens(self):
"""Returns a generator of the tokens."""
if self._filename:
with open(self._filename) as ifile:
self._data = ifile.read()
with QasmParser(self._filename) as qasm_p:
return qasm_p.get_tokens() | Returns a generator of the tokens. | Below is the the instruction that describes the task:
### Input:
Returns a generator of the tokens.
### Response:
def get_tokens(self):
"""Returns a generator of the tokens."""
if self._filename:
with open(self._filename) as ifile:
self._data = ifile.read()
with QasmParser(self._filename) as qasm_p:
return qasm_p.get_tokens() |
def to_blob(self, repo):
""":return: Blob using the information of this index entry"""
return Blob(repo, self.binsha, self.mode, self.path) | :return: Blob using the information of this index entry | Below is the the instruction that describes the task:
### Input:
:return: Blob using the information of this index entry
### Response:
def to_blob(self, repo):
""":return: Blob using the information of this index entry"""
return Blob(repo, self.binsha, self.mode, self.path) |
def itemFromIndex(self, index):
""" Gets the item given the model index
"""
sourceIndex = self.mapToSource(index)
return self.sourceModel().itemFromIndex(sourceIndex) | Gets the item given the model index | Below is the the instruction that describes the task:
### Input:
Gets the item given the model index
### Response:
def itemFromIndex(self, index):
""" Gets the item given the model index
"""
sourceIndex = self.mapToSource(index)
return self.sourceModel().itemFromIndex(sourceIndex) |
def parse_variables(args):
"""
Parse variables as passed on the command line.
Returns
-------
dict
Mapping variable name to the value.
"""
if args is None:
return {}
def parse_variable(string):
tokens = string.split('=')
name = tokens[0]
value = '='.join(tokens[1:])
return name, value
return {
name: value
for name, value in (parse_variable(v) for v in args)
} | Parse variables as passed on the command line.
Returns
-------
dict
Mapping variable name to the value. | Below is the the instruction that describes the task:
### Input:
Parse variables as passed on the command line.
Returns
-------
dict
Mapping variable name to the value.
### Response:
def parse_variables(args):
"""
Parse variables as passed on the command line.
Returns
-------
dict
Mapping variable name to the value.
"""
if args is None:
return {}
def parse_variable(string):
tokens = string.split('=')
name = tokens[0]
value = '='.join(tokens[1:])
return name, value
return {
name: value
for name, value in (parse_variable(v) for v in args)
} |
def send(*args, **kwargs):
""" Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method.
"""
if not _module_instance:
raise GraphiteSendException(
"Must call graphitesend.init() before sending")
_module_instance.send(*args, **kwargs)
return _module_instance | Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method. | Below is the the instruction that describes the task:
### Input:
Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method.
### Response:
def send(*args, **kwargs):
""" Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method.
"""
if not _module_instance:
raise GraphiteSendException(
"Must call graphitesend.init() before sending")
_module_instance.send(*args, **kwargs)
return _module_instance |
def _submit(
self,
metric_name,
message,
send_histograms_buckets=True,
send_monotonic_counter=False,
custom_tags=None,
hostname=None,
):
"""
For each metric in the message, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
"""
if message.type < len(self.METRIC_TYPES):
for metric in message.metric:
custom_hostname = self._get_hostname(hostname, metric)
if message.type == 0:
val = getattr(metric, self.METRIC_TYPES[message.type]).value
if self._is_value_valid(val):
if send_monotonic_counter:
self._submit_monotonic_count(metric_name, val, metric, custom_tags, custom_hostname)
else:
self._submit_gauge(metric_name, val, metric, custom_tags, custom_hostname)
else:
self.log.debug("Metric value is not supported for metric {}.".format(metric_name))
elif message.type == 4:
self._submit_gauges_from_histogram(
metric_name, metric, send_histograms_buckets, custom_tags, custom_hostname
)
elif message.type == 2:
self._submit_gauges_from_summary(metric_name, metric, custom_tags, custom_hostname)
else:
val = getattr(metric, self.METRIC_TYPES[message.type]).value
if self._is_value_valid(val):
if message.name in self.rate_metrics:
self._submit_rate(metric_name, val, metric, custom_tags, custom_hostname)
else:
self._submit_gauge(metric_name, val, metric, custom_tags, custom_hostname)
else:
self.log.debug("Metric value is not supported for metric {}.".format(metric_name))
else:
self.log.error("Metric type {} unsupported for metric {}.".format(message.type, message.name)) | For each metric in the message, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog. | Below is the the instruction that describes the task:
### Input:
For each metric in the message, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
### Response:
def _submit(
self,
metric_name,
message,
send_histograms_buckets=True,
send_monotonic_counter=False,
custom_tags=None,
hostname=None,
):
"""
For each metric in the message, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
"""
if message.type < len(self.METRIC_TYPES):
for metric in message.metric:
custom_hostname = self._get_hostname(hostname, metric)
if message.type == 0:
val = getattr(metric, self.METRIC_TYPES[message.type]).value
if self._is_value_valid(val):
if send_monotonic_counter:
self._submit_monotonic_count(metric_name, val, metric, custom_tags, custom_hostname)
else:
self._submit_gauge(metric_name, val, metric, custom_tags, custom_hostname)
else:
self.log.debug("Metric value is not supported for metric {}.".format(metric_name))
elif message.type == 4:
self._submit_gauges_from_histogram(
metric_name, metric, send_histograms_buckets, custom_tags, custom_hostname
)
elif message.type == 2:
self._submit_gauges_from_summary(metric_name, metric, custom_tags, custom_hostname)
else:
val = getattr(metric, self.METRIC_TYPES[message.type]).value
if self._is_value_valid(val):
if message.name in self.rate_metrics:
self._submit_rate(metric_name, val, metric, custom_tags, custom_hostname)
else:
self._submit_gauge(metric_name, val, metric, custom_tags, custom_hostname)
else:
self.log.debug("Metric value is not supported for metric {}.".format(metric_name))
else:
self.log.error("Metric type {} unsupported for metric {}.".format(message.type, message.name)) |
def download(cls, root, check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset.
"""
path = os.path.join(root, cls.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in cls.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
download_from_url(url, zpath)
zroot, ext = os.path.splitext(zpath)
_, ext_inner = os.path.splitext(zroot)
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
# tarfile cannot handle bare .gz files
elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar':
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
elif ext == '.gz':
with gzip.open(zpath, 'rb') as gz:
with open(zroot, 'wb') as uncompressed:
shutil.copyfileobj(gz, uncompressed)
return os.path.join(path, cls.dirname) | Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset. | Below is the the instruction that describes the task:
### Input:
Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset.
### Response:
def download(cls, root, check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset.
"""
path = os.path.join(root, cls.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in cls.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
download_from_url(url, zpath)
zroot, ext = os.path.splitext(zpath)
_, ext_inner = os.path.splitext(zroot)
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
# tarfile cannot handle bare .gz files
elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar':
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
elif ext == '.gz':
with gzip.open(zpath, 'rb') as gz:
with open(zroot, 'wb') as uncompressed:
shutil.copyfileobj(gz, uncompressed)
return os.path.join(path, cls.dirname) |
def __search(self, obj, item, parent="root", parents_ids=frozenset({})):
"""The main search method"""
if self.__skip_this(item, parent):
return
elif isinstance(obj, strings) and isinstance(item, strings):
self.__search_str(obj, item, parent)
elif isinstance(obj, strings) and isinstance(item, numbers):
return
elif isinstance(obj, numbers):
self.__search_numbers(obj, item, parent)
elif isinstance(obj, MutableMapping):
self.__search_dict(obj, item, parent, parents_ids)
elif isinstance(obj, tuple):
self.__search_tuple(obj, item, parent, parents_ids)
elif isinstance(obj, (set, frozenset)):
if self.warning_num < 10:
logger.warning(
"Set item detected in the path."
"'set' objects do NOT support indexing. But DeepSearch will still report a path."
)
self.warning_num += 1
self.__search_iterable(obj, item, parent, parents_ids)
elif isinstance(obj, Iterable):
self.__search_iterable(obj, item, parent, parents_ids)
else:
self.__search_obj(obj, item, parent, parents_ids) | The main search method | Below is the the instruction that describes the task:
### Input:
The main search method
### Response:
def __search(self, obj, item, parent="root", parents_ids=frozenset({})):
"""The main search method"""
if self.__skip_this(item, parent):
return
elif isinstance(obj, strings) and isinstance(item, strings):
self.__search_str(obj, item, parent)
elif isinstance(obj, strings) and isinstance(item, numbers):
return
elif isinstance(obj, numbers):
self.__search_numbers(obj, item, parent)
elif isinstance(obj, MutableMapping):
self.__search_dict(obj, item, parent, parents_ids)
elif isinstance(obj, tuple):
self.__search_tuple(obj, item, parent, parents_ids)
elif isinstance(obj, (set, frozenset)):
if self.warning_num < 10:
logger.warning(
"Set item detected in the path."
"'set' objects do NOT support indexing. But DeepSearch will still report a path."
)
self.warning_num += 1
self.__search_iterable(obj, item, parent, parents_ids)
elif isinstance(obj, Iterable):
self.__search_iterable(obj, item, parent, parents_ids)
else:
self.__search_obj(obj, item, parent, parents_ids) |
def gather_bootstrap_script(bootstrap=None):
'''
Download the salt-bootstrap script, and return its location
bootstrap
URL of alternate bootstrap script
CLI Example:
.. code-block:: bash
salt '*' config.gather_bootstrap_script
'''
if not HAS_CLOUD:
return False, 'config.gather_bootstrap_script is unavailable'
ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap)
if 'Success' in ret and ret['Success']['Files updated']:
return ret['Success']['Files updated'][0] | Download the salt-bootstrap script, and return its location
bootstrap
URL of alternate bootstrap script
CLI Example:
.. code-block:: bash
salt '*' config.gather_bootstrap_script | Below is the the instruction that describes the task:
### Input:
Download the salt-bootstrap script, and return its location
bootstrap
URL of alternate bootstrap script
CLI Example:
.. code-block:: bash
salt '*' config.gather_bootstrap_script
### Response:
def gather_bootstrap_script(bootstrap=None):
'''
Download the salt-bootstrap script, and return its location
bootstrap
URL of alternate bootstrap script
CLI Example:
.. code-block:: bash
salt '*' config.gather_bootstrap_script
'''
if not HAS_CLOUD:
return False, 'config.gather_bootstrap_script is unavailable'
ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap)
if 'Success' in ret and ret['Success']['Files updated']:
return ret['Success']['Files updated'][0] |
def get_all_quiz_submissions(self, quiz_id, course_id, include=None):
"""
Get all quiz submissions.
Get a list of all submissions for this quiz. Users who can view or manage
grades for a course will have submissions from multiple users returned. A
user who can only submit will have only their own submissions returned. When
a user has an in-progress submission, only that submission is returned. When
there isn't an in-progress quiz_submission, all completed submissions,
including previous attempts, are returned.
<b>200 OK</b> response code is returned if the request was successful.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - include
"""Associations to include with the quiz submission."""
if include is not None:
self._validate_enum(include, ["submission", "quiz", "user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions".format(**path), data=data, params=params, no_data=True) | Get all quiz submissions.
Get a list of all submissions for this quiz. Users who can view or manage
grades for a course will have submissions from multiple users returned. A
user who can only submit will have only their own submissions returned. When
a user has an in-progress submission, only that submission is returned. When
there isn't an in-progress quiz_submission, all completed submissions,
including previous attempts, are returned.
<b>200 OK</b> response code is returned if the request was successful. | Below is the the instruction that describes the task:
### Input:
Get all quiz submissions.
Get a list of all submissions for this quiz. Users who can view or manage
grades for a course will have submissions from multiple users returned. A
user who can only submit will have only their own submissions returned. When
a user has an in-progress submission, only that submission is returned. When
there isn't an in-progress quiz_submission, all completed submissions,
including previous attempts, are returned.
<b>200 OK</b> response code is returned if the request was successful.
### Response:
def get_all_quiz_submissions(self, quiz_id, course_id, include=None):
"""
Get all quiz submissions.
Get a list of all submissions for this quiz. Users who can view or manage
grades for a course will have submissions from multiple users returned. A
user who can only submit will have only their own submissions returned. When
a user has an in-progress submission, only that submission is returned. When
there isn't an in-progress quiz_submission, all completed submissions,
including previous attempts, are returned.
<b>200 OK</b> response code is returned if the request was successful.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# OPTIONAL - include
"""Associations to include with the quiz submission."""
if include is not None:
self._validate_enum(include, ["submission", "quiz", "user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions".format(**path), data=data, params=params, no_data=True) |
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
'''
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
'''
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
# Updating only the explicitly provided attributes by the user
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals())) | Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number" | Below is the the instruction that describes the task:
### Input:
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
### Response:
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
'''
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
'''
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
# Updating only the explicitly provided attributes by the user
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals())) |
def iterate(self, shuffle=True):
'''Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset.
'''
for _ in range(self.iteration_size):
if self._callable is not None:
yield self._callable()
else:
yield self._next_batch(shuffle) | Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset. | Below is the the instruction that describes the task:
### Input:
Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset.
### Response:
def iterate(self, shuffle=True):
'''Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset.
'''
for _ in range(self.iteration_size):
if self._callable is not None:
yield self._callable()
else:
yield self._next_batch(shuffle) |
def track_from_url(url, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
"""
param_dict = dict(url = url)
return _upload(param_dict, timeout, data=None) | Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>> | Below is the the instruction that describes the task:
### Input:
Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
### Response:
def track_from_url(url, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
"""
param_dict = dict(url = url)
return _upload(param_dict, timeout, data=None) |
def read_identity(self, identity_id, query_membership=None, properties=None):
"""ReadIdentity.
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>`
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Identity', response) | ReadIdentity.
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>` | Below is the the instruction that describes the task:
### Input:
ReadIdentity.
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>`
### Response:
def read_identity(self, identity_id, query_membership=None, properties=None):
"""ReadIdentity.
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>`
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Identity', response) |
def fold_text_stream(self, prefix, postfix="", hidden_stream=None, **kwargs):
"""
:param str prefix: always visible
:param str postfix: always visible, right after.
:param io.TextIOBase|io.StringIO hidden_stream: sys.stdout by default.
If this is sys.stdout, it will replace that stream,
and collect the data during the context (in the `with` block).
"""
import io
if hidden_stream is None:
hidden_stream = sys.stdout
assert isinstance(hidden_stream, io.IOBase)
assert hidden_stream is sys.stdout, "currently not supported otherwise"
hidden_buf = io.StringIO()
with self._temp_replace_attrib(sys, "stdout", hidden_buf):
yield
self.fold_text(prefix=prefix, postfix=postfix, hidden=hidden_buf.getvalue(), **kwargs) | :param str prefix: always visible
:param str postfix: always visible, right after.
:param io.TextIOBase|io.StringIO hidden_stream: sys.stdout by default.
If this is sys.stdout, it will replace that stream,
and collect the data during the context (in the `with` block). | Below is the the instruction that describes the task:
### Input:
:param str prefix: always visible
:param str postfix: always visible, right after.
:param io.TextIOBase|io.StringIO hidden_stream: sys.stdout by default.
If this is sys.stdout, it will replace that stream,
and collect the data during the context (in the `with` block).
### Response:
def fold_text_stream(self, prefix, postfix="", hidden_stream=None, **kwargs):
"""
:param str prefix: always visible
:param str postfix: always visible, right after.
:param io.TextIOBase|io.StringIO hidden_stream: sys.stdout by default.
If this is sys.stdout, it will replace that stream,
and collect the data during the context (in the `with` block).
"""
import io
if hidden_stream is None:
hidden_stream = sys.stdout
assert isinstance(hidden_stream, io.IOBase)
assert hidden_stream is sys.stdout, "currently not supported otherwise"
hidden_buf = io.StringIO()
with self._temp_replace_attrib(sys, "stdout", hidden_buf):
yield
self.fold_text(prefix=prefix, postfix=postfix, hidden=hidden_buf.getvalue(), **kwargs) |
def hue(self, hue):
""" Set the group hue.
:param hue: Hue in decimal percent (0.0-1.0).
"""
if hue < 0 or hue > 1:
raise ValueError("Hue must be a percentage "
"represented as decimal 0-1.0")
self._hue = hue
cmd = self.command_set.hue(hue)
self.send(cmd) | Set the group hue.
:param hue: Hue in decimal percent (0.0-1.0). | Below is the the instruction that describes the task:
### Input:
Set the group hue.
:param hue: Hue in decimal percent (0.0-1.0).
### Response:
def hue(self, hue):
""" Set the group hue.
:param hue: Hue in decimal percent (0.0-1.0).
"""
if hue < 0 or hue > 1:
raise ValueError("Hue must be a percentage "
"represented as decimal 0-1.0")
self._hue = hue
cmd = self.command_set.hue(hue)
self.send(cmd) |
def getRetinas(self, retina_name=None):
"""Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina]
"""
resourcePath = '/retinas'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [retina.Retina(**r) for r in response.json()] | Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina] | Below is the the instruction that describes the task:
### Input:
Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina]
### Response:
def getRetinas(self, retina_name=None):
"""Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina]
"""
resourcePath = '/retinas'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [retina.Retina(**r) for r in response.json()] |
async def jsk_repeat(self, ctx: commands.Context, times: int, *, command_string: str):
"""
Runs a command multiple times in a row.
This acts like the command was invoked several times manually, so it obeys cooldowns.
"""
with self.submit(ctx): # allow repeats to be cancelled
for _ in range(times):
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
await alt_ctx.command.reinvoke(alt_ctx) | Runs a command multiple times in a row.
This acts like the command was invoked several times manually, so it obeys cooldowns. | Below is the the instruction that describes the task:
### Input:
Runs a command multiple times in a row.
This acts like the command was invoked several times manually, so it obeys cooldowns.
### Response:
async def jsk_repeat(self, ctx: commands.Context, times: int, *, command_string: str):
"""
Runs a command multiple times in a row.
This acts like the command was invoked several times manually, so it obeys cooldowns.
"""
with self.submit(ctx): # allow repeats to be cancelled
for _ in range(times):
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
await alt_ctx.command.reinvoke(alt_ctx) |
def _update_response_location_header(self, resource):
"""
Adds a new or replaces an existing Location header to the response
headers pointing to the URL of the given resource.
"""
location = resource_to_url(resource, request=self.request)
loc_hdr = ('Location', location)
hdr_names = [hdr[0].upper() for hdr in self.request.response.headerlist]
try:
idx = hdr_names.index('LOCATION')
except ValueError:
self.request.response.headerlist.append(loc_hdr)
else:
# Replace existing location header.
# FIXME: It is not clear under which conditions this happens, so
# we do not have a test for it yet.
self.request.response.headerlist[idx] = loc_hdr | Adds a new or replaces an existing Location header to the response
headers pointing to the URL of the given resource. | Below is the the instruction that describes the task:
### Input:
Adds a new or replaces an existing Location header to the response
headers pointing to the URL of the given resource.
### Response:
def _update_response_location_header(self, resource):
"""
Adds a new or replaces an existing Location header to the response
headers pointing to the URL of the given resource.
"""
location = resource_to_url(resource, request=self.request)
loc_hdr = ('Location', location)
hdr_names = [hdr[0].upper() for hdr in self.request.response.headerlist]
try:
idx = hdr_names.index('LOCATION')
except ValueError:
self.request.response.headerlist.append(loc_hdr)
else:
# Replace existing location header.
# FIXME: It is not clear under which conditions this happens, so
# we do not have a test for it yet.
self.request.response.headerlist[idx] = loc_hdr |
def make_room_alias(chain_id: ChainID, *suffixes: str) -> str:
"""Given a chain_id and any number of suffixes (global room names, pair of addresses),
compose and return the canonical room name for raiden network
network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available,
else numeric id
Params:
chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific
*suffixes: one or more suffixes for the name
Returns:
Qualified full room name. e.g.:
make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery'
"""
network_name = ID_TO_NETWORKNAME.get(chain_id, str(chain_id))
return ROOM_NAME_SEPARATOR.join([ROOM_NAME_PREFIX, network_name, *suffixes]) | Given a chain_id and any number of suffixes (global room names, pair of addresses),
compose and return the canonical room name for raiden network
network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available,
else numeric id
Params:
chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific
*suffixes: one or more suffixes for the name
Returns:
Qualified full room name. e.g.:
make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery' | Below is the the instruction that describes the task:
### Input:
Given a chain_id and any number of suffixes (global room names, pair of addresses),
compose and return the canonical room name for raiden network
network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available,
else numeric id
Params:
chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific
*suffixes: one or more suffixes for the name
Returns:
Qualified full room name. e.g.:
make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery'
### Response:
def make_room_alias(chain_id: ChainID, *suffixes: str) -> str:
"""Given a chain_id and any number of suffixes (global room names, pair of addresses),
compose and return the canonical room name for raiden network
network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available,
else numeric id
Params:
chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific
*suffixes: one or more suffixes for the name
Returns:
Qualified full room name. e.g.:
make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery'
"""
network_name = ID_TO_NETWORKNAME.get(chain_id, str(chain_id))
return ROOM_NAME_SEPARATOR.join([ROOM_NAME_PREFIX, network_name, *suffixes]) |
def request(self, method, params=None):
"""Send a JSON RPC request to the client.
Args:
method (str): The method name of the message to send
params (any): The payload of the message
Returns:
Future that will resolve once a response has been received
"""
msg_id = self._id_generator()
log.debug('Sending request with id %s: %s %s', msg_id, method, params)
message = {
'jsonrpc': JSONRPC_VERSION,
'id': msg_id,
'method': method,
}
if params is not None:
message['params'] = params
request_future = futures.Future()
request_future.add_done_callback(self._cancel_callback(msg_id))
self._server_request_futures[msg_id] = request_future
self._consumer(message)
return request_future | Send a JSON RPC request to the client.
Args:
method (str): The method name of the message to send
params (any): The payload of the message
Returns:
Future that will resolve once a response has been received | Below is the the instruction that describes the task:
### Input:
Send a JSON RPC request to the client.
Args:
method (str): The method name of the message to send
params (any): The payload of the message
Returns:
Future that will resolve once a response has been received
### Response:
def request(self, method, params=None):
"""Send a JSON RPC request to the client.
Args:
method (str): The method name of the message to send
params (any): The payload of the message
Returns:
Future that will resolve once a response has been received
"""
msg_id = self._id_generator()
log.debug('Sending request with id %s: %s %s', msg_id, method, params)
message = {
'jsonrpc': JSONRPC_VERSION,
'id': msg_id,
'method': method,
}
if params is not None:
message['params'] = params
request_future = futures.Future()
request_future.add_done_callback(self._cancel_callback(msg_id))
self._server_request_futures[msg_id] = request_future
self._consumer(message)
return request_future |
def normalize_query_parameters(query_string):
"""
normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid.
"""
if query_string == "":
return {}
components = query_string.split("&")
result = {}
for component in components:
try:
key, value = component.split("=", 1)
except ValueError:
key = component
value = ""
if component == "":
# Empty component; skip it.
continue
key = normalize_uri_path_component(key)
value = normalize_uri_path_component(value)
if key in result:
result[key].append(value)
else:
result[key] = [value]
return dict([(key, sorted(values))
for key, values in iteritems(result)]) | normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid. | Below is the the instruction that describes the task:
### Input:
normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid.
### Response:
def normalize_query_parameters(query_string):
"""
normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid.
"""
if query_string == "":
return {}
components = query_string.split("&")
result = {}
for component in components:
try:
key, value = component.split("=", 1)
except ValueError:
key = component
value = ""
if component == "":
# Empty component; skip it.
continue
key = normalize_uri_path_component(key)
value = normalize_uri_path_component(value)
if key in result:
result[key].append(value)
else:
result[key] = [value]
return dict([(key, sorted(values))
for key, values in iteritems(result)]) |
def find_urls(self, site, frametype, gpsstart, gpsend,
match=None, on_gaps='warn'):
"""Find all files of the given type in the [start, end) GPS interval.
"""
span = Segment(gpsstart, gpsend)
cache = [e for e in self._read_ffl_cache(site, frametype) if
e.observatory == site and e.description == frametype and
e.segment.intersects(span)]
urls = [e.path for e in cache]
missing = SegmentList([span]) - cache_segments(cache)
if match:
match = re.compile(match)
urls = list(filter(match.search, urls))
# no missing data or don't care, return
if on_gaps == 'ignore' or not missing:
return urls
# handle missing data
msg = 'Missing segments: \n{0}'.format('\n'.join(map(str, missing)))
if on_gaps == 'warn':
warnings.warn(msg)
return urls
raise RuntimeError(msg) | Find all files of the given type in the [start, end) GPS interval. | Below is the the instruction that describes the task:
### Input:
Find all files of the given type in the [start, end) GPS interval.
### Response:
def find_urls(self, site, frametype, gpsstart, gpsend,
match=None, on_gaps='warn'):
"""Find all files of the given type in the [start, end) GPS interval.
"""
span = Segment(gpsstart, gpsend)
cache = [e for e in self._read_ffl_cache(site, frametype) if
e.observatory == site and e.description == frametype and
e.segment.intersects(span)]
urls = [e.path for e in cache]
missing = SegmentList([span]) - cache_segments(cache)
if match:
match = re.compile(match)
urls = list(filter(match.search, urls))
# no missing data or don't care, return
if on_gaps == 'ignore' or not missing:
return urls
# handle missing data
msg = 'Missing segments: \n{0}'.format('\n'.join(map(str, missing)))
if on_gaps == 'warn':
warnings.warn(msg)
return urls
raise RuntimeError(msg) |
def enrich_backend(url, clean, backend_name, backend_params, cfg_section_name,
ocean_index=None,
ocean_index_enrich=None,
db_projects_map=None, json_projects_map=None,
db_sortinghat=None,
no_incremental=False, only_identities=False,
github_token=None, studies=False, only_studies=False,
url_enrich=None, events_enrich=False,
db_user=None, db_password=None, db_host=None,
do_refresh_projects=False, do_refresh_identities=False,
author_id=None, author_uuid=None, filter_raw=None,
filters_raw_prefix=None, jenkins_rename_file=None,
unaffiliated_group=None, pair_programming=False,
node_regex=False, studies_args=None, es_enrich_aliases=None,
last_enrich_date=None, projects_json_repo=None):
""" Enrich Ocean index """
backend = None
enrich_index = None
if ocean_index or ocean_index_enrich:
clean = False # don't remove index, it could be shared
if do_refresh_projects or do_refresh_identities:
clean = False # refresh works over the existing enriched items
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
try:
backend = None
backend_cmd = None
if klass:
# Data is retrieved from Perceval
backend_cmd = init_backend(klass(*backend_params))
backend = backend_cmd.backend
if ocean_index_enrich:
enrich_index = ocean_index_enrich
else:
if not ocean_index:
ocean_index = backend_name + "_" + backend.origin
enrich_index = ocean_index + "_enrich"
if events_enrich:
enrich_index += "_events"
enrich_backend = connector[2](db_sortinghat, db_projects_map, json_projects_map,
db_user, db_password, db_host)
enrich_backend.set_params(backend_params)
# store the cfg section name in the enrich backend to recover the corresponding project name in projects.json
enrich_backend.set_cfg_section_name(cfg_section_name)
enrich_backend.set_from_date(last_enrich_date)
if url_enrich:
elastic_enrich = get_elastic(url_enrich, enrich_index, clean, enrich_backend, es_enrich_aliases)
else:
elastic_enrich = get_elastic(url, enrich_index, clean, enrich_backend, es_enrich_aliases)
enrich_backend.set_elastic(elastic_enrich)
if github_token and backend_name == "git":
enrich_backend.set_github_token(github_token)
if jenkins_rename_file and backend_name == "jenkins":
enrich_backend.set_jenkins_rename_file(jenkins_rename_file)
if unaffiliated_group:
enrich_backend.unaffiliated_group = unaffiliated_group
if pair_programming:
enrich_backend.pair_programming = pair_programming
if node_regex:
enrich_backend.node_regex = node_regex
# The filter raw is needed to be able to assign the project value to an enriched item
# see line 544, grimoire_elk/enriched/enrich.py (fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw)
if filter_raw:
enrich_backend.set_filter_raw(filter_raw)
elif filters_raw_prefix:
enrich_backend.set_filter_raw_should(filters_raw_prefix)
enrich_backend.set_projects_json_repo(projects_json_repo)
ocean_backend = get_ocean_backend(backend_cmd, enrich_backend,
no_incremental, filter_raw,
filters_raw_prefix)
if only_studies:
logger.info("Running only studies (no SH and no enrichment)")
do_studies(ocean_backend, enrich_backend, studies_args)
elif do_refresh_projects:
logger.info("Refreshing project field in %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
field_id = enrich_backend.get_field_unique_id()
eitems = refresh_projects(enrich_backend)
enrich_backend.elastic.bulk_upload(eitems, field_id)
elif do_refresh_identities:
author_attr = None
author_values = None
if author_id:
author_attr = 'author_id'
author_values = [author_id]
elif author_uuid:
author_attr = 'author_uuid'
author_values = [author_uuid]
logger.info("Refreshing identities fields in %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
field_id = enrich_backend.get_field_unique_id()
eitems = refresh_identities(enrich_backend, author_attr, author_values)
enrich_backend.elastic.bulk_upload(eitems, field_id)
else:
clean = False # Don't remove ocean index when enrich
elastic_ocean = get_elastic(url, ocean_index, clean, ocean_backend)
ocean_backend.set_elastic(elastic_ocean)
logger.info("Adding enrichment data to %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
if db_sortinghat and enrich_backend.has_identities():
# FIXME: This step won't be done from enrich in the future
total_ids = load_identities(ocean_backend, enrich_backend)
logger.info("Total identities loaded %i ", total_ids)
if only_identities:
logger.info("Only SH identities added. Enrich not done!")
else:
# Enrichment for the new items once SH update is finished
if not events_enrich:
enrich_count = enrich_items(ocean_backend, enrich_backend)
if enrich_count is not None:
logger.info("Total items enriched %i ", enrich_count)
else:
enrich_count = enrich_items(ocean_backend, enrich_backend, events=True)
if enrich_count is not None:
logger.info("Total events enriched %i ", enrich_count)
if studies:
do_studies(ocean_backend, enrich_backend, studies_args)
except Exception as ex:
if backend:
logger.error("Error enriching ocean from %s (%s): %s",
backend_name, backend.origin, ex, exc_info=True)
else:
logger.error("Error enriching ocean %s", ex, exc_info=True)
logger.info("Done %s ", backend_name) | Enrich Ocean index | Below is the the instruction that describes the task:
### Input:
Enrich Ocean index
### Response:
def enrich_backend(url, clean, backend_name, backend_params, cfg_section_name,
ocean_index=None,
ocean_index_enrich=None,
db_projects_map=None, json_projects_map=None,
db_sortinghat=None,
no_incremental=False, only_identities=False,
github_token=None, studies=False, only_studies=False,
url_enrich=None, events_enrich=False,
db_user=None, db_password=None, db_host=None,
do_refresh_projects=False, do_refresh_identities=False,
author_id=None, author_uuid=None, filter_raw=None,
filters_raw_prefix=None, jenkins_rename_file=None,
unaffiliated_group=None, pair_programming=False,
node_regex=False, studies_args=None, es_enrich_aliases=None,
last_enrich_date=None, projects_json_repo=None):
""" Enrich Ocean index """
backend = None
enrich_index = None
if ocean_index or ocean_index_enrich:
clean = False # don't remove index, it could be shared
if do_refresh_projects or do_refresh_identities:
clean = False # refresh works over the existing enriched items
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
try:
backend = None
backend_cmd = None
if klass:
# Data is retrieved from Perceval
backend_cmd = init_backend(klass(*backend_params))
backend = backend_cmd.backend
if ocean_index_enrich:
enrich_index = ocean_index_enrich
else:
if not ocean_index:
ocean_index = backend_name + "_" + backend.origin
enrich_index = ocean_index + "_enrich"
if events_enrich:
enrich_index += "_events"
enrich_backend = connector[2](db_sortinghat, db_projects_map, json_projects_map,
db_user, db_password, db_host)
enrich_backend.set_params(backend_params)
# store the cfg section name in the enrich backend to recover the corresponding project name in projects.json
enrich_backend.set_cfg_section_name(cfg_section_name)
enrich_backend.set_from_date(last_enrich_date)
if url_enrich:
elastic_enrich = get_elastic(url_enrich, enrich_index, clean, enrich_backend, es_enrich_aliases)
else:
elastic_enrich = get_elastic(url, enrich_index, clean, enrich_backend, es_enrich_aliases)
enrich_backend.set_elastic(elastic_enrich)
if github_token and backend_name == "git":
enrich_backend.set_github_token(github_token)
if jenkins_rename_file and backend_name == "jenkins":
enrich_backend.set_jenkins_rename_file(jenkins_rename_file)
if unaffiliated_group:
enrich_backend.unaffiliated_group = unaffiliated_group
if pair_programming:
enrich_backend.pair_programming = pair_programming
if node_regex:
enrich_backend.node_regex = node_regex
# The filter raw is needed to be able to assign the project value to an enriched item
# see line 544, grimoire_elk/enriched/enrich.py (fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw)
if filter_raw:
enrich_backend.set_filter_raw(filter_raw)
elif filters_raw_prefix:
enrich_backend.set_filter_raw_should(filters_raw_prefix)
enrich_backend.set_projects_json_repo(projects_json_repo)
ocean_backend = get_ocean_backend(backend_cmd, enrich_backend,
no_incremental, filter_raw,
filters_raw_prefix)
if only_studies:
logger.info("Running only studies (no SH and no enrichment)")
do_studies(ocean_backend, enrich_backend, studies_args)
elif do_refresh_projects:
logger.info("Refreshing project field in %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
field_id = enrich_backend.get_field_unique_id()
eitems = refresh_projects(enrich_backend)
enrich_backend.elastic.bulk_upload(eitems, field_id)
elif do_refresh_identities:
author_attr = None
author_values = None
if author_id:
author_attr = 'author_id'
author_values = [author_id]
elif author_uuid:
author_attr = 'author_uuid'
author_values = [author_uuid]
logger.info("Refreshing identities fields in %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
field_id = enrich_backend.get_field_unique_id()
eitems = refresh_identities(enrich_backend, author_attr, author_values)
enrich_backend.elastic.bulk_upload(eitems, field_id)
else:
clean = False # Don't remove ocean index when enrich
elastic_ocean = get_elastic(url, ocean_index, clean, ocean_backend)
ocean_backend.set_elastic(elastic_ocean)
logger.info("Adding enrichment data to %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
if db_sortinghat and enrich_backend.has_identities():
# FIXME: This step won't be done from enrich in the future
total_ids = load_identities(ocean_backend, enrich_backend)
logger.info("Total identities loaded %i ", total_ids)
if only_identities:
logger.info("Only SH identities added. Enrich not done!")
else:
# Enrichment for the new items once SH update is finished
if not events_enrich:
enrich_count = enrich_items(ocean_backend, enrich_backend)
if enrich_count is not None:
logger.info("Total items enriched %i ", enrich_count)
else:
enrich_count = enrich_items(ocean_backend, enrich_backend, events=True)
if enrich_count is not None:
logger.info("Total events enriched %i ", enrich_count)
if studies:
do_studies(ocean_backend, enrich_backend, studies_args)
except Exception as ex:
if backend:
logger.error("Error enriching ocean from %s (%s): %s",
backend_name, backend.origin, ex, exc_info=True)
else:
logger.error("Error enriching ocean %s", ex, exc_info=True)
logger.info("Done %s ", backend_name) |
def transform(self, flip_x, flip_y, swap_xy):
"""Transform view of the image.
.. note::
Transforming the image is generally faster than rotating,
if rotating in 90 degree increments. Also see :meth:`rotate`.
Parameters
----------
flipx, flipy : bool
If `True`, flip the image in the X and Y axes, respectively
swapxy : bool
If `True`, swap the X and Y axes.
"""
self.logger.debug("flip_x=%s flip_y=%s swap_xy=%s" % (
flip_x, flip_y, swap_xy))
with self.suppress_redraw:
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy) | Transform view of the image.
.. note::
Transforming the image is generally faster than rotating,
if rotating in 90 degree increments. Also see :meth:`rotate`.
Parameters
----------
flipx, flipy : bool
If `True`, flip the image in the X and Y axes, respectively
swapxy : bool
If `True`, swap the X and Y axes. | Below is the the instruction that describes the task:
### Input:
Transform view of the image.
.. note::
Transforming the image is generally faster than rotating,
if rotating in 90 degree increments. Also see :meth:`rotate`.
Parameters
----------
flipx, flipy : bool
If `True`, flip the image in the X and Y axes, respectively
swapxy : bool
If `True`, swap the X and Y axes.
### Response:
def transform(self, flip_x, flip_y, swap_xy):
"""Transform view of the image.
.. note::
Transforming the image is generally faster than rotating,
if rotating in 90 degree increments. Also see :meth:`rotate`.
Parameters
----------
flipx, flipy : bool
If `True`, flip the image in the X and Y axes, respectively
swapxy : bool
If `True`, swap the X and Y axes.
"""
self.logger.debug("flip_x=%s flip_y=%s swap_xy=%s" % (
flip_x, flip_y, swap_xy))
with self.suppress_redraw:
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy) |
def create_vm(client, name, compute_resource, datastore, disksize, nics,
memory, num_cpus, guest_id, host=None):
"""Create a virtual machine using the specified values.
:param name: The name of the VM to create.
:type name: str
:param compute_resource: The name of a ComputeResource in which to \
create the VM.
:type compute_resource: str
:param datastore: The name of the datastore on which to create the VM.
:type datastore: str
:param disksize: The size of the disk, specified in KB, MB or GB. e.g. \
20971520KB, 20480MB, 20GB.
:type disksize: str
:param nics: The NICs to create, specified in a list of dict's which \
contain a "network_name" and "type" key. e.g. \
{"network_name": "VM Network", "type": "VirtualE1000"}
:type nics: list of dict's
:param memory: The amount of memory for the VM. Specified in KB, MB or \
GB. e.g. 2097152KB, 2048MB, 2GB.
:type memory: str
:param num_cpus: The number of CPUs the VM will have.
:type num_cpus: int
:param guest_id: The vSphere string of the VM guest you are creating. \
The list of VMs can be found at \
http://pubs.vmware.com/vsphere-50/index.jsp?topic=/com.vmware.wssdk.apiref.doc_50/right-pane.html
:type guest_id: str
:param host: The name of the host (default: None), if you want to \
provision the VM on a \ specific host.
:type host: str
"""
print("Creating VM %s" % name)
# If the host is not set, use the ComputeResource as the target
if host is None:
target = client.find_entity_view("ComputeResource",
filter={"name": compute_resource})
resource_pool = target.resourcePool
else:
target = client.find_entity_view("HostSystem", filter={"name": host})
resource_pool = target.parent.resourcePool
disksize_pattern = re.compile("^\d+[KMG]B")
if disksize_pattern.match(disksize) is None:
print("Disk size %s is invalid. Try \"12G\" or similar" % disksize)
sys.exit(1)
if disksize.endswith("GB"):
disksize_kb = int(disksize[:-2]) * 1024 * 1024
elif disksize.endswith("MB"):
disksize_kb = int(disksize[:-2]) * 1024
elif disksize.endswith("KB"):
disksize_kb = int(disksize[:-2])
else:
print("Disk size %s is invalid. Try \"12G\" or similar" % disksize)
memory_pattern = re.compile("^\d+[KMG]B")
if memory_pattern.match(memory) is None:
print("Memory size %s is invalid. Try \"12G\" or similar" % memory)
sys.exit(1)
if memory.endswith("GB"):
memory_mb = int(memory[:-2]) * 1024
elif memory.endswith("MB"):
memory_mb = int(memory[:-2])
elif memory.endswith("KB"):
memory_mb = int(memory[:-2]) / 1024
else:
print("Memory size %s is invalid. Try \"12G\" or similar" % memory)
# A list of devices to be assigned to the VM
vm_devices = []
# Create a disk controller
controller = create_controller(client, "VirtualLsiLogicController")
vm_devices.append(controller)
ds_to_use = None
for ds in target.datastore:
if ds.name == datastore:
ds_to_use = ds
break
if ds_to_use is None:
print("Could not find datastore on %s with name %s" %
(target.name, datastore))
sys.exit(1)
# Ensure the datastore is accessible and has enough space
if ds_to_use.summary.accessible is not True:
print("Datastore (%s) exists, but is not accessible" %
ds_to_use.summary.name)
sys.exit(1)
if ds_to_use.summary.freeSpace < disksize_kb * 1024:
print("Datastore (%s) exists, but does not have sufficient"
" free space." % ds_to_use.summary.name)
sys.exit(1)
disk = create_disk(client, datastore=ds_to_use, disksize_kb=disksize_kb)
vm_devices.append(disk)
for nic in nics:
nic_spec = create_nic(client, target, nic)
if nic_spec is None:
print("Could not create spec for NIC")
sys.exit(1)
# Append the nic spec to the vm_devices list
vm_devices.append(nic_spec)
vmfi = client.create("VirtualMachineFileInfo")
vmfi.vmPathName = "[%s]" % ds_to_use.summary.name
vm_config_spec = client.create("VirtualMachineConfigSpec")
vm_config_spec.name = name
vm_config_spec.memoryMB = memory_mb
vm_config_spec.files = vmfi
vm_config_spec.annotation = "Auto-provisioned by psphere"
vm_config_spec.numCPUs = num_cpus
vm_config_spec.guestId = guest_id
vm_config_spec.deviceChange = vm_devices
# Find the datacenter of the target
if target.__class__.__name__ == "HostSystem":
datacenter = target.parent.parent.parent
else:
datacenter = target.parent.parent
try:
task = datacenter.vmFolder.CreateVM_Task(config=vm_config_spec,
pool=resource_pool)
except VimFault as e:
print("Failed to create %s: " % e)
sys.exit()
while task.info.state in ["queued", "running"]:
time.sleep(5)
task.update()
print("Waiting 5 more seconds for VM creation")
if task.info.state == "success":
elapsed_time = task.info.completeTime - task.info.startTime
print("Successfully created new VM %s. Server took %s seconds." %
(name, elapsed_time.seconds))
elif task.info.state == "error":
print("ERROR: The task for creating the VM has finished with"
" an error. If an error was reported it will follow.")
try:
print("ERROR: %s" % task.info.error.localizedMessage)
except AttributeError:
print("ERROR: There is no error message available.")
else:
print("UNKNOWN: The task reports an unknown state %s" %
task.info.state) | Create a virtual machine using the specified values.
:param name: The name of the VM to create.
:type name: str
:param compute_resource: The name of a ComputeResource in which to \
create the VM.
:type compute_resource: str
:param datastore: The name of the datastore on which to create the VM.
:type datastore: str
:param disksize: The size of the disk, specified in KB, MB or GB. e.g. \
20971520KB, 20480MB, 20GB.
:type disksize: str
:param nics: The NICs to create, specified in a list of dict's which \
contain a "network_name" and "type" key. e.g. \
{"network_name": "VM Network", "type": "VirtualE1000"}
:type nics: list of dict's
:param memory: The amount of memory for the VM. Specified in KB, MB or \
GB. e.g. 2097152KB, 2048MB, 2GB.
:type memory: str
:param num_cpus: The number of CPUs the VM will have.
:type num_cpus: int
:param guest_id: The vSphere string of the VM guest you are creating. \
The list of VMs can be found at \
http://pubs.vmware.com/vsphere-50/index.jsp?topic=/com.vmware.wssdk.apiref.doc_50/right-pane.html
:type guest_id: str
:param host: The name of the host (default: None), if you want to \
provision the VM on a \ specific host.
:type host: str | Below is the the instruction that describes the task:
### Input:
Create a virtual machine using the specified values.
:param name: The name of the VM to create.
:type name: str
:param compute_resource: The name of a ComputeResource in which to \
create the VM.
:type compute_resource: str
:param datastore: The name of the datastore on which to create the VM.
:type datastore: str
:param disksize: The size of the disk, specified in KB, MB or GB. e.g. \
20971520KB, 20480MB, 20GB.
:type disksize: str
:param nics: The NICs to create, specified in a list of dict's which \
contain a "network_name" and "type" key. e.g. \
{"network_name": "VM Network", "type": "VirtualE1000"}
:type nics: list of dict's
:param memory: The amount of memory for the VM. Specified in KB, MB or \
GB. e.g. 2097152KB, 2048MB, 2GB.
:type memory: str
:param num_cpus: The number of CPUs the VM will have.
:type num_cpus: int
:param guest_id: The vSphere string of the VM guest you are creating. \
The list of VMs can be found at \
http://pubs.vmware.com/vsphere-50/index.jsp?topic=/com.vmware.wssdk.apiref.doc_50/right-pane.html
:type guest_id: str
:param host: The name of the host (default: None), if you want to \
provision the VM on a \ specific host.
:type host: str
### Response:
def create_vm(client, name, compute_resource, datastore, disksize, nics,
memory, num_cpus, guest_id, host=None):
"""Create a virtual machine using the specified values.
:param name: The name of the VM to create.
:type name: str
:param compute_resource: The name of a ComputeResource in which to \
create the VM.
:type compute_resource: str
:param datastore: The name of the datastore on which to create the VM.
:type datastore: str
:param disksize: The size of the disk, specified in KB, MB or GB. e.g. \
20971520KB, 20480MB, 20GB.
:type disksize: str
:param nics: The NICs to create, specified in a list of dict's which \
contain a "network_name" and "type" key. e.g. \
{"network_name": "VM Network", "type": "VirtualE1000"}
:type nics: list of dict's
:param memory: The amount of memory for the VM. Specified in KB, MB or \
GB. e.g. 2097152KB, 2048MB, 2GB.
:type memory: str
:param num_cpus: The number of CPUs the VM will have.
:type num_cpus: int
:param guest_id: The vSphere string of the VM guest you are creating. \
The list of VMs can be found at \
http://pubs.vmware.com/vsphere-50/index.jsp?topic=/com.vmware.wssdk.apiref.doc_50/right-pane.html
:type guest_id: str
:param host: The name of the host (default: None), if you want to \
provision the VM on a \ specific host.
:type host: str
"""
print("Creating VM %s" % name)
# If the host is not set, use the ComputeResource as the target
if host is None:
target = client.find_entity_view("ComputeResource",
filter={"name": compute_resource})
resource_pool = target.resourcePool
else:
target = client.find_entity_view("HostSystem", filter={"name": host})
resource_pool = target.parent.resourcePool
disksize_pattern = re.compile("^\d+[KMG]B")
if disksize_pattern.match(disksize) is None:
print("Disk size %s is invalid. Try \"12G\" or similar" % disksize)
sys.exit(1)
if disksize.endswith("GB"):
disksize_kb = int(disksize[:-2]) * 1024 * 1024
elif disksize.endswith("MB"):
disksize_kb = int(disksize[:-2]) * 1024
elif disksize.endswith("KB"):
disksize_kb = int(disksize[:-2])
else:
print("Disk size %s is invalid. Try \"12G\" or similar" % disksize)
memory_pattern = re.compile("^\d+[KMG]B")
if memory_pattern.match(memory) is None:
print("Memory size %s is invalid. Try \"12G\" or similar" % memory)
sys.exit(1)
if memory.endswith("GB"):
memory_mb = int(memory[:-2]) * 1024
elif memory.endswith("MB"):
memory_mb = int(memory[:-2])
elif memory.endswith("KB"):
memory_mb = int(memory[:-2]) / 1024
else:
print("Memory size %s is invalid. Try \"12G\" or similar" % memory)
# A list of devices to be assigned to the VM
vm_devices = []
# Create a disk controller
controller = create_controller(client, "VirtualLsiLogicController")
vm_devices.append(controller)
ds_to_use = None
for ds in target.datastore:
if ds.name == datastore:
ds_to_use = ds
break
if ds_to_use is None:
print("Could not find datastore on %s with name %s" %
(target.name, datastore))
sys.exit(1)
# Ensure the datastore is accessible and has enough space
if ds_to_use.summary.accessible is not True:
print("Datastore (%s) exists, but is not accessible" %
ds_to_use.summary.name)
sys.exit(1)
if ds_to_use.summary.freeSpace < disksize_kb * 1024:
print("Datastore (%s) exists, but does not have sufficient"
" free space." % ds_to_use.summary.name)
sys.exit(1)
disk = create_disk(client, datastore=ds_to_use, disksize_kb=disksize_kb)
vm_devices.append(disk)
for nic in nics:
nic_spec = create_nic(client, target, nic)
if nic_spec is None:
print("Could not create spec for NIC")
sys.exit(1)
# Append the nic spec to the vm_devices list
vm_devices.append(nic_spec)
vmfi = client.create("VirtualMachineFileInfo")
vmfi.vmPathName = "[%s]" % ds_to_use.summary.name
vm_config_spec = client.create("VirtualMachineConfigSpec")
vm_config_spec.name = name
vm_config_spec.memoryMB = memory_mb
vm_config_spec.files = vmfi
vm_config_spec.annotation = "Auto-provisioned by psphere"
vm_config_spec.numCPUs = num_cpus
vm_config_spec.guestId = guest_id
vm_config_spec.deviceChange = vm_devices
# Find the datacenter of the target
if target.__class__.__name__ == "HostSystem":
datacenter = target.parent.parent.parent
else:
datacenter = target.parent.parent
try:
task = datacenter.vmFolder.CreateVM_Task(config=vm_config_spec,
pool=resource_pool)
except VimFault as e:
print("Failed to create %s: " % e)
sys.exit()
while task.info.state in ["queued", "running"]:
time.sleep(5)
task.update()
print("Waiting 5 more seconds for VM creation")
if task.info.state == "success":
elapsed_time = task.info.completeTime - task.info.startTime
print("Successfully created new VM %s. Server took %s seconds." %
(name, elapsed_time.seconds))
elif task.info.state == "error":
print("ERROR: The task for creating the VM has finished with"
" an error. If an error was reported it will follow.")
try:
print("ERROR: %s" % task.info.error.localizedMessage)
except AttributeError:
print("ERROR: There is no error message available.")
else:
print("UNKNOWN: The task reports an unknown state %s" %
task.info.state) |
def visualize_learning_result(self, state_key):
'''
Visualize learning result.
'''
x, y = state_key
map_arr = copy.deepcopy(self.__map_arr)
goal_point_tuple = np.where(map_arr == self.__end_point_label)
goal_x, goal_y = goal_point_tuple
map_arr[y][x] = "@"
self.__map_arr_list.append(map_arr)
if goal_x == x and goal_y == y:
for i in range(10):
key = len(self.__map_arr_list) - (10 - i)
print("Number of searches: " + str(key))
print(self.__map_arr_list[key])
print("Total number of searches: " + str(self.t))
print(self.__map_arr_list[-1])
print("Goal !!") | Visualize learning result. | Below is the the instruction that describes the task:
### Input:
Visualize learning result.
### Response:
def visualize_learning_result(self, state_key):
'''
Visualize learning result.
'''
x, y = state_key
map_arr = copy.deepcopy(self.__map_arr)
goal_point_tuple = np.where(map_arr == self.__end_point_label)
goal_x, goal_y = goal_point_tuple
map_arr[y][x] = "@"
self.__map_arr_list.append(map_arr)
if goal_x == x and goal_y == y:
for i in range(10):
key = len(self.__map_arr_list) - (10 - i)
print("Number of searches: " + str(key))
print(self.__map_arr_list[key])
print("Total number of searches: " + str(self.t))
print(self.__map_arr_list[-1])
print("Goal !!") |
def _retrieve_value(self, entity, default=None):
"""Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied.
"""
return entity._values.get(self._name, default) | Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied. | Below is the the instruction that describes the task:
### Input:
Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied.
### Response:
def _retrieve_value(self, entity, default=None):
"""Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied.
"""
return entity._values.get(self._name, default) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.