code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_open_port() -> int:
"""
Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(("", 0))
free_socket.listen(1)
port = free_socket.getsockname()[1]
free_socket.close()
return port
|
Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT
|
def set_functions(self, f='a*x*cos(b*x)+c', p='a=-0.2, b, c=3', c=None, bg=None, **kwargs):
"""
Sets the function(s) used to describe the data.
Parameters
----------
f=['a*x*cos(b*x)+c', 'a*x+c']
This can be a string function, a defined function
my_function(x,a,b), or a list of some combination
of these two types of objects. The length of such
a list must be equal to the number of data sets
supplied to the fit routine.
p='a=1.5, b'
This must be a comma-separated string list of
parameters used to fit. If an initial guess value is
not specified, 1.0 will be used.
If a function object is supplied, it is assumed that
this string lists the parameter names in order.
c=None
Fit _constants; like p, but won't be allowed to float
during the fit. This can also be None.
bg=None
Can be functions in the same format as f describing a
background (which can be subtracted during fits, etc)
Additional keyword arguments are added to the globals used when
evaluating the functions.
"""
# initialize everything
self._pnames = []
self._cnames = []
self._pguess = []
self._constants = []
# Update the globals
self._globals.update(kwargs)
# store these for later
self._f_raw = f
self._bg_raw = bg
# break up the constant names and initial values.
if c:
for s in c.split(','):
# split by '=' and see if there is an initial value
s = s.split('=')
# add the name to the list
self._cnames.append(s[0].strip())
# if there is a guess value, add this (or 1.0)
if len(s) > 1: self._constants.append(float(s[1]))
else: self._constants.append(1.0)
# break up the parameter names and initial values.
for s in p.split(','):
# split by '=' and see if there is an initial value
s = s.split('=')
# add the name to the list
self._pnames.append(s[0].strip())
# if there is a guess value, add this (or 1.0)
if len(s) > 1: self._pguess.append(float(s[1]))
else: self._pguess.append(1.0)
# use the internal settings we just set to create the functions
self._update_functions()
if self['autoplot']: self.plot()
return self
|
Sets the function(s) used to describe the data.
Parameters
----------
f=['a*x*cos(b*x)+c', 'a*x+c']
This can be a string function, a defined function
my_function(x,a,b), or a list of some combination
of these two types of objects. The length of such
a list must be equal to the number of data sets
supplied to the fit routine.
p='a=1.5, b'
This must be a comma-separated string list of
parameters used to fit. If an initial guess value is
not specified, 1.0 will be used.
If a function object is supplied, it is assumed that
this string lists the parameter names in order.
c=None
Fit _constants; like p, but won't be allowed to float
during the fit. This can also be None.
bg=None
Can be functions in the same format as f describing a
background (which can be subtracted during fits, etc)
Additional keyword arguments are added to the globals used when
evaluating the functions.
|
def _request(self, method, path, data=None, reestablish_session=True):
"""Perform HTTP request for REST API."""
if path.startswith("http"):
url = path # For cases where URL of different form is needed.
else:
url = self._format_path(path)
headers = {"Content-Type": "application/json"}
if self._user_agent:
headers['User-Agent'] = self._user_agent
body = json.dumps(data).encode("utf-8")
try:
response = requests.request(method, url, data=body, headers=headers,
cookies=self._cookies, **self._request_kwargs)
except requests.exceptions.RequestException as err:
# error outside scope of HTTP status codes
# e.g. unable to resolve domain name
raise PureError(err.message)
if response.status_code == 200:
if "application/json" in response.headers.get("Content-Type", ""):
if response.cookies:
self._cookies.update(response.cookies)
else:
self._cookies.clear()
content = response.json()
if isinstance(content, list):
content = ResponseList(content)
elif isinstance(content, dict):
content = ResponseDict(content)
content.headers = response.headers
return content
raise PureError("Response not in JSON: " + response.text)
elif response.status_code == 401 and reestablish_session:
self._start_session()
return self._request(method, path, data, False)
elif response.status_code == 450 and self._renegotiate_rest_version:
# Purity REST API version is incompatible.
old_version = self._rest_version
self._rest_version = self._choose_rest_version()
if old_version == self._rest_version:
# Got 450 error, but the rest version was supported
# Something really unexpected happened.
raise PureHTTPError(self._target, str(self._rest_version), response)
return self._request(method, path, data, reestablish_session)
else:
raise PureHTTPError(self._target, str(self._rest_version), response)
|
Perform HTTP request for REST API.
|
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
|
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
|
def expectScreen(self, filename, maxrms=0):
""" Wait until the display matches a target image
filename: an image file to read and compare against
maxrms: the maximum root mean square between histograms of the
screen and target image
"""
log.debug('expectScreen %s', filename)
return self._expectFramebuffer(filename, 0, 0, maxrms)
|
Wait until the display matches a target image
filename: an image file to read and compare against
maxrms: the maximum root mean square between histograms of the
screen and target image
|
def setSubTitle(self, title):
"""
Sets the sub-title for this page to the inputed title.
:param title | <str>
"""
self._subTitleLabel.setText(title)
self._subTitleLabel.adjustSize()
self.adjustMargins()
|
Sets the sub-title for this page to the inputed title.
:param title | <str>
|
def send(self, message_id, args=[], kwargs={}):
"""
Send a message to this state machine.
To send a message to a state machine by its name, use
`stmpy.Driver.send` instead.
"""
self._logger.debug('Send {} in stm {}'.format(message_id, self.id))
self._driver._add_event(
event_id=message_id, args=args, kwargs=kwargs, stm=self)
|
Send a message to this state machine.
To send a message to a state machine by its name, use
`stmpy.Driver.send` instead.
|
def is_feasible(self, solution):
"""Returns True if the given solution's derivatives may have potential
valid, complete solutions.
"""
newvars = solution.new.keys()
for newvar in newvars:
for c in self._constraints_for_var.get(newvar, []):
values = c.extract_values(solution, use_defaults=True)
if not c(*values):
return False
return True
|
Returns True if the given solution's derivatives may have potential
valid, complete solutions.
|
def resize(image, target_size, **kwargs):
"""Resize an ndarray image of rank 3 or 4.
target_size can be a tuple `(width, height)` or scalar `width`."""
if isinstance(target_size, int):
target_size = (target_size, target_size)
if not isinstance(target_size, (list, tuple, np.ndarray)):
message = (
"`target_size` should be a single number (width) or a list"
"/tuple/ndarray (width, height), not {}.".format(type(target_size))
)
raise ValueError(message)
rank = len(image.shape)
assert 3 <= rank <= 4
original_size = image.shape[-3:-1]
if original_size == target_size:
return image # noop return because ndimage.zoom doesn't check itself
# TODO: maybe allow -1 in target_size to signify aspect-ratio preserving resize?
ratios = [t / o for t, o in zip(target_size, original_size)]
zoom = [1] * rank
zoom[-3:-1] = ratios
roughly_resized = ndimage.zoom(image, zoom, **kwargs)
return roughly_resized[..., : target_size[0], : target_size[1], :]
|
Resize an ndarray image of rank 3 or 4.
target_size can be a tuple `(width, height)` or scalar `width`.
|
def dhcp_options_exists(dhcp_options_id=None, name=None, dhcp_options_name=None,
tags=None, region=None, key=None, keyid=None, profile=None):
'''
Check if a dhcp option exists.
Returns True if the dhcp option exists; Returns False otherwise.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.dhcp_options_exists dhcp_options_id='dhcp-a0bl34pp'
'''
if name:
log.warning('boto_vpc.dhcp_options_exists: name parameter is deprecated '
'use dhcp_options_name instead.')
dhcp_options_name = name
return resource_exists('dhcp_options', name=dhcp_options_name,
resource_id=dhcp_options_id, tags=tags,
region=region, key=key, keyid=keyid,
profile=profile)
|
Check if a dhcp option exists.
Returns True if the dhcp option exists; Returns False otherwise.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.dhcp_options_exists dhcp_options_id='dhcp-a0bl34pp'
|
def preprocess_dict(d):
'''
Preprocess a dict to be used as environment variables.
:param d: dict to be processed
'''
out_env = {}
for k, v in d.items():
if not type(v) in PREPROCESSORS:
raise KeyError('Invalid type in dict: {}'.format(type(v)))
out_env[k] = PREPROCESSORS[type(v)](v)
return out_env
|
Preprocess a dict to be used as environment variables.
:param d: dict to be processed
|
def signalflow(self, token, endpoint=None, timeout=None, compress=None):
"""Obtain a SignalFlow API client."""
from . import signalflow
compress = compress if compress is not None else self._compress
return signalflow.SignalFlowClient(
token=token,
endpoint=endpoint or self._stream_endpoint,
timeout=timeout or self._timeout,
compress=compress)
|
Obtain a SignalFlow API client.
|
def _ast_option_group_to_code(self, option_group, **kwargs):
"""Convert an AST option group to python source code."""
lines = ["option("]
lines.extend(self._indent(self._ast_to_code(option_group.expression)))
lines.append(")")
return lines
|
Convert an AST option group to python source code.
|
def resizeToContents(self):
"""
Resizes the list widget to fit its contents vertically.
"""
if self.count():
item = self.item(self.count() - 1)
rect = self.visualItemRect(item)
height = rect.bottom() + 8
height = max(28, height)
self.setFixedHeight(height)
else:
self.setFixedHeight(self.minimumHeight())
|
Resizes the list widget to fit its contents vertically.
|
def contigs_to_positions(contigs, binning=10000):
"""Build positions from contig labels
From a list of contig labels and a binning parameter,
build a list of positions that's essentially a
concatenation of linspaces with step equal to the
binning.
Parameters
----------
contigs : list or array_like
The list of contig labels, must be sorted.
binning : int, optional
The step for the list of positions. Default is 10000.
Returns
-------
positions : numpy.ndarray
The piece-wise sorted list of positions
"""
positions = np.zeros_like(contigs)
index = 0
for _, chunk in itertools.groubpy(contigs):
l = len(chunk)
positions[index : index + l] = np.arange(list(chunk)) * binning
index += l
return positions
|
Build positions from contig labels
From a list of contig labels and a binning parameter,
build a list of positions that's essentially a
concatenation of linspaces with step equal to the
binning.
Parameters
----------
contigs : list or array_like
The list of contig labels, must be sorted.
binning : int, optional
The step for the list of positions. Default is 10000.
Returns
-------
positions : numpy.ndarray
The piece-wise sorted list of positions
|
def logical_chassis_fwdl_sanity_input_cluster_options_auto_activate_auto_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
cluster_options = ET.SubElement(input, "cluster-options")
auto_activate = ET.SubElement(cluster_options, "auto-activate")
auto_activate = ET.SubElement(auto_activate, "auto-activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _sample_orthonormal_to(mu):
"""Sample point on sphere orthogonal to mu."""
v = np.random.randn(mu.shape[0])
proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)
orthto = v - proj_mu_v
return orthto / np.linalg.norm(orthto)
|
Sample point on sphere orthogonal to mu.
|
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
"""
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None
|
Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
|
def normalize(text, mode='NFKC', ignore=''):
"""Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana,
Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII
and DIGIT.
Additionally, Full-width wave dash (〜) etc. are normalized
Parameters
----------
text : str
Source string.
mode : str
Unicode normalization mode.
ignore : str
Characters to be ignored in converting.
Return
------
str
Normalized string.
Examples
--------
>>> print(jaconv.normalize('ティロ・フィナ〜レ', 'NFKC'))
ティロ・フィナーレ
"""
text = text.replace('〜', 'ー').replace('~', 'ー')
text = text.replace("’", "'").replace('”', '"').replace('“', '``')
text = text.replace('―', '-').replace('‐', '-').replace('˗', '-').replace('֊', '-')
text = text.replace('‐', '-').replace('‑', '-').replace('‒', '-').replace('–', '-')
text = text.replace('⁃', '-').replace('⁻', '-').replace('₋', '-').replace('−', '-')
text = text.replace('﹣', 'ー').replace('-', 'ー').replace('—', 'ー').replace('―', 'ー')
text = text.replace('━', 'ー').replace('─', 'ー')
return unicodedata.normalize(mode, text)
|
Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana,
Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII
and DIGIT.
Additionally, Full-width wave dash (〜) etc. are normalized
Parameters
----------
text : str
Source string.
mode : str
Unicode normalization mode.
ignore : str
Characters to be ignored in converting.
Return
------
str
Normalized string.
Examples
--------
>>> print(jaconv.normalize('ティロ・フィナ〜レ', 'NFKC'))
ティロ・フィナーレ
|
def list_cubes(self):
""" List all available JSON files. """
for file_name in os.listdir(self.directory):
if '.' in file_name:
name, ext = file_name.rsplit('.', 1)
if ext.lower() == 'json':
yield name
|
List all available JSON files.
|
def _sm_to_pain(self, *args, **kwargs):
"""
Start the blockade event
"""
_logger.info("Starting chaos for blockade %s" % self._blockade_name)
self._do_blockade_event()
# start the timer to end the pain
millisec = random.randint(self._run_min_time, self._run_max_time)
self._timer = threading.Timer(millisec / 1000.0, self.event_timeout)
self._timer.start()
|
Start the blockade event
|
def field_exists(self, well_x, well_y, field_x, field_y):
"Check if field exists ScanFieldArray."
return self.field(well_x, well_y, field_x, field_y) != None
|
Check if field exists ScanFieldArray.
|
def _get_underlying_data(self, instance):
"""Return data from raw data store, rather than overridden
__get__ methods. Should NOT be overwritten.
"""
self._touch(instance)
return self.data.get(instance, None)
|
Return data from raw data store, rather than overridden
__get__ methods. Should NOT be overwritten.
|
def ssh(cmd=''):
'''
SSH into the server(s) (sequentially if more than one)
Args:
cmd (str) ='': Command to run on the server
'''
with settings(warn_only=True):
local('ssh -A -o StrictHostKeyChecking=no -i "%s" %s@%s "%s"' % (
env.key_filename, env.user, env.host, cmd))
|
SSH into the server(s) (sequentially if more than one)
Args:
cmd (str) ='': Command to run on the server
|
def string(value,
allow_empty = False,
coerce_value = False,
minimum_length = None,
maximum_length = None,
whitespace_padding = False,
**kwargs):
"""Validate that ``value`` is a valid string.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value``
is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if
``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param coerce_value: If ``True``, will attempt to coerce ``value`` to a string if
it is not already. If ``False``, will raise a :class:`ValueError` if ``value``
is not a string. Defaults to ``False``.
:type coerce_value: :class:`bool <python:bool>`
:param minimum_length: If supplied, indicates the minimum number of characters
needed to be valid.
:type minimum_length: :class:`int <python:int>`
:param maximum_length: If supplied, indicates the minimum number of characters
needed to be valid.
:type maximum_length: :class:`int <python:int>`
:param whitespace_padding: If ``True`` and the value is below the
``minimum_length``, pad the value with spaces. Defaults to ``False``.
:type whitespace_padding: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`str <python:str>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` is not a valid string and ``coerce_value``
is ``False``
:raises MinimumLengthError: if ``minimum_length`` is supplied and the length of
``value`` is less than ``minimum_length`` and ``whitespace_padding`` is
``False``
:raises MaximumLengthError: if ``maximum_length`` is supplied and the length of
``value`` is more than the ``maximum_length``
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
minimum_length = integer(minimum_length, allow_empty = True)
maximum_length = integer(maximum_length, allow_empty = True)
if coerce_value:
value = str(value)
elif not isinstance(value, basestring):
raise errors.CannotCoerceError('value (%s) was not coerced to a string' % value)
if value and maximum_length and len(value) > maximum_length:
raise errors.MaximumLengthError(
'value (%s) exceeds maximum length %s' % (value, maximum_length)
)
if value and minimum_length and len(value) < minimum_length:
if whitespace_padding:
value = value.ljust(minimum_length, ' ')
else:
raise errors.MinimumLengthError(
'value (%s) is below the minimum length %s' % (value, minimum_length)
)
return value
|
Validate that ``value`` is a valid string.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value``
is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if
``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param coerce_value: If ``True``, will attempt to coerce ``value`` to a string if
it is not already. If ``False``, will raise a :class:`ValueError` if ``value``
is not a string. Defaults to ``False``.
:type coerce_value: :class:`bool <python:bool>`
:param minimum_length: If supplied, indicates the minimum number of characters
needed to be valid.
:type minimum_length: :class:`int <python:int>`
:param maximum_length: If supplied, indicates the minimum number of characters
needed to be valid.
:type maximum_length: :class:`int <python:int>`
:param whitespace_padding: If ``True`` and the value is below the
``minimum_length``, pad the value with spaces. Defaults to ``False``.
:type whitespace_padding: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`str <python:str>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` is not a valid string and ``coerce_value``
is ``False``
:raises MinimumLengthError: if ``minimum_length`` is supplied and the length of
``value`` is less than ``minimum_length`` and ``whitespace_padding`` is
``False``
:raises MaximumLengthError: if ``maximum_length`` is supplied and the length of
``value`` is more than the ``maximum_length``
|
def add(ctx, alias, mapping, backend):
"""
Add a new alias to your configuration file.
"""
if not backend:
backends_list = ctx.obj['settings'].get_backends()
if len(backends_list) > 1:
raise click.UsageError(
"You're using more than 1 backend. Please set the backend to "
"add the alias to with the --backend option (choices are %s)" %
", ".join(dict(backends_list).keys())
)
add_mapping(ctx, alias, mapping, backend)
|
Add a new alias to your configuration file.
|
def login(self, verify_code=''):
"""
登录微信公众平台
注意在实例化 ``WechatExt`` 的时候,如果没有传入 ``token`` 及 ``cookies`` ,将会自动调用该方法,无需手动调用
当且仅当捕获到 ``NeedLoginError`` 异常时才需要调用此方法进行登录重试
:param verify_code: 验证码, 不传入则为无验证码
:raises LoginVerifyCodeError: 需要验证码或验证码出错,该异常为 ``LoginError`` 的子类
:raises LoginError: 登录出错异常,异常内容为微信服务器响应的内容,可作为日志记录下来
"""
url = 'https://mp.weixin.qq.com/cgi-bin/login'
payload = {
'username': self.__username,
'pwd': self.__password,
'imgcode': verify_code,
'f': 'json',
}
headers = {
'x-requested-with': 'XMLHttpRequest',
'referer': 'https://mp.weixin.qq.com/cgi-bin/loginpage?t=wxm2-login&lang=zh_CN',
'Cookie': self.__cookies,
}
r = requests.post(url, data=payload, headers=headers)
s = re.search(r'token=(\d+)', r.text)
if not s:
try:
error_code = json.loads(r.text)['base_resp']['ret']
except (KeyError, ValueError):
raise LoginError(r.text)
if error_code in [-8, -27]:
raise LoginVerifyCodeError(r.text)
elif re.search(r'readtemplate', r.text):
raise LoginError('You need to turn off the safety protection of wechat.')
else:
raise LoginError(r.text)
self.__token = int(s.group(1))
self.__cookies = ''
for cookie in r.cookies:
self.__cookies += cookie.name + '=' + cookie.value + ';'
|
登录微信公众平台
注意在实例化 ``WechatExt`` 的时候,如果没有传入 ``token`` 及 ``cookies`` ,将会自动调用该方法,无需手动调用
当且仅当捕获到 ``NeedLoginError`` 异常时才需要调用此方法进行登录重试
:param verify_code: 验证码, 不传入则为无验证码
:raises LoginVerifyCodeError: 需要验证码或验证码出错,该异常为 ``LoginError`` 的子类
:raises LoginError: 登录出错异常,异常内容为微信服务器响应的内容,可作为日志记录下来
|
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Create an index (e.g. the y position) for the target words
self.indexed_words_ = np.flip(self.target_words, axis=0)
if self.ignore_case:
self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_])
# Stack is used to create a 2D array from the generator
try:
points_target = np.stack(self._compute_dispersion(X, y))
except ValueError:
raise YellowbrickValueError((
"No indexed words were found in the corpus"
))
points = np.stack(zip(points_target[:,0].astype(int),
points_target[:,1].astype(int)))
self.target = points_target[:,2]
self._check_missing_words(points)
self.draw(points, self.target)
return self
|
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
|
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*'))
|
Build package and upload to pypi.
|
def verify_docker_image_sha(chain, link):
"""Verify that built docker shas match the artifact.
Args:
chain (ChainOfTrust): the chain we're operating on.
link (LinkOfTrust): the task link we're checking.
Raises:
CoTError: on failure.
"""
cot = link.cot
task = link.task
errors = []
if isinstance(task['payload'].get('image'), dict):
# Using pre-built image from docker-image task
docker_image_task_id = task['extra']['chainOfTrust']['inputs']['docker-image']
log.debug("Verifying {} {} against docker-image {}".format(
link.name, link.task_id, docker_image_task_id
))
if docker_image_task_id != task['payload']['image']['taskId']:
errors.append("{} {} docker-image taskId isn't consistent!: {} vs {}".format(
link.name, link.task_id, docker_image_task_id,
task['payload']['image']['taskId']
))
else:
path = task['payload']['image']['path']
# we need change the hash alg everywhere if we change, and recreate
# the docker images...
image_hash = cot['environment']['imageArtifactHash']
alg, sha = image_hash.split(':')
docker_image_link = chain.get_link(docker_image_task_id)
upstream_sha = docker_image_link.cot['artifacts'].get(path, {}).get(alg)
if upstream_sha is None:
errors.append("{} {} docker-image docker sha {} is missing! {}".format(
link.name, link.task_id, alg,
docker_image_link.cot['artifacts'][path]
))
elif upstream_sha != sha:
errors.append("{} {} docker-image docker sha doesn't match! {} {} vs {}".format(
link.name, link.task_id, alg, sha, upstream_sha
))
else:
log.debug("Found matching docker-image sha {}".format(upstream_sha))
else:
prebuilt_task_types = chain.context.config['prebuilt_docker_image_task_types']
if prebuilt_task_types != "any" and link.task_type not in prebuilt_task_types:
errors.append(
"Task type {} not allowed to use a prebuilt docker image!".format(
link.task_type
)
)
raise_on_errors(errors)
|
Verify that built docker shas match the artifact.
Args:
chain (ChainOfTrust): the chain we're operating on.
link (LinkOfTrust): the task link we're checking.
Raises:
CoTError: on failure.
|
def write_to_file(self, file_path='', date=(datetime.date.today()),
organization='llnl'):
"""
Writes stargazers data to file.
"""
with open(file_path, 'w+') as out:
out.write('date,organization,stargazers\n')
sorted_stargazers = sorted(self.stargazers)#sort based on lowercase
for star in sorted_stargazers:
out.write(star + ',' + str(self.stargazers[star]) + '\n')
out.close()
|
Writes stargazers data to file.
|
def is_text(bytesio):
"""Return whether the first KB of contents seems to be binary.
This is roughly based on libmagic's binary/text detection:
https://github.com/file/file/blob/df74b09b9027676088c797528edcaae5a9ce9ad0/src/encoding.c#L203-L228
"""
text_chars = (
bytearray([7, 8, 9, 10, 11, 12, 13, 27]) +
bytearray(range(0x20, 0x7F)) +
bytearray(range(0x80, 0X100))
)
return not bool(bytesio.read(1024).translate(None, text_chars))
|
Return whether the first KB of contents seems to be binary.
This is roughly based on libmagic's binary/text detection:
https://github.com/file/file/blob/df74b09b9027676088c797528edcaae5a9ce9ad0/src/encoding.c#L203-L228
|
def load(self, file_key):
"""Load the data."""
var = self.sd.select(file_key)
data = xr.DataArray(from_sds(var, chunks=CHUNK_SIZE),
dims=['y', 'x']).astype(np.float32)
data = data.where(data != var._FillValue)
try:
data = data * np.float32(var.scale_factor)
except AttributeError:
pass
return data
|
Load the data.
|
def from_url(cls, url):
"""
Given a resource uri, return an instance of that cache initialized with the given
parameters. An example usage:
>>> from aiocache import Cache
>>> Cache.from_url('memory://')
<aiocache.backends.memory.SimpleMemoryCache object at 0x1081dbb00>
a more advanced usage using queryparams to configure the cache:
>>> from aiocache import Cache
>>> cache = Cache.from_url('redis://localhost:10/1?pool_min_size=1')
>>> cache
RedisCache (localhost:10)
>>> cache.db
1
>>> cache.pool_min_size
1
:param url: string identifying the resource uri of the cache to connect to
"""
parsed_url = urllib.parse.urlparse(url)
kwargs = dict(urllib.parse.parse_qsl(parsed_url.query))
cache_class = Cache.get_scheme_class(parsed_url.scheme)
if parsed_url.path:
kwargs.update(cache_class.parse_uri_path(parsed_url.path))
if parsed_url.hostname:
kwargs["endpoint"] = parsed_url.hostname
if parsed_url.port:
kwargs["port"] = parsed_url.port
if parsed_url.password:
kwargs["password"] = parsed_url.password
return Cache(cache_class, **kwargs)
|
Given a resource uri, return an instance of that cache initialized with the given
parameters. An example usage:
>>> from aiocache import Cache
>>> Cache.from_url('memory://')
<aiocache.backends.memory.SimpleMemoryCache object at 0x1081dbb00>
a more advanced usage using queryparams to configure the cache:
>>> from aiocache import Cache
>>> cache = Cache.from_url('redis://localhost:10/1?pool_min_size=1')
>>> cache
RedisCache (localhost:10)
>>> cache.db
1
>>> cache.pool_min_size
1
:param url: string identifying the resource uri of the cache to connect to
|
def draw_on_image(self, image,
color=(0, 255, 0), color_lines=None, color_points=None,
alpha=1.0, alpha_lines=None, alpha_points=None,
size=1, size_lines=None, size_points=None,
antialiased=True,
raise_if_out_of_image=False):
"""
Draw all line strings onto a given image.
Parameters
----------
image : ndarray
The `(H,W,C)` `uint8` image onto which to draw the line strings.
color : iterable of int, optional
Color to use as RGB, i.e. three values.
The color of the lines and points are derived from this value,
unless they are set.
color_lines : None or iterable of int
Color to use for the line segments as RGB, i.e. three values.
If ``None``, this value is derived from `color`.
color_points : None or iterable of int
Color to use for the points as RGB, i.e. three values.
If ``None``, this value is derived from ``0.5 * color``.
alpha : float, optional
Opacity of the line strings. Higher values denote more visible
points.
The alphas of the line and points are derived from this value,
unless they are set.
alpha_lines : None or float, optional
Opacity of the line strings. Higher values denote more visible
line string.
If ``None``, this value is derived from `alpha`.
alpha_points : None or float, optional
Opacity of the line string points. Higher values denote more
visible points.
If ``None``, this value is derived from `alpha`.
size : int, optional
Size of the line strings.
The sizes of the line and points are derived from this value,
unless they are set.
size_lines : None or int, optional
Thickness of the line segments.
If ``None``, this value is derived from `size`.
size_points : None or int, optional
Size of the points in pixels.
If ``None``, this value is derived from ``3 * size``.
antialiased : bool, optional
Whether to draw the lines with anti-aliasing activated.
This does currently not affect the point drawing.
raise_if_out_of_image : bool, optional
Whether to raise an error if a line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Image with line strings drawn on it.
"""
# TODO improve efficiency here by copying only once
for ls in self.line_strings:
image = ls.draw_on_image(
image,
color=color, color_lines=color_lines, color_points=color_points,
alpha=alpha, alpha_lines=alpha_lines, alpha_points=alpha_points,
size=size, size_lines=size_lines, size_points=size_points,
antialiased=antialiased,
raise_if_out_of_image=raise_if_out_of_image
)
return image
|
Draw all line strings onto a given image.
Parameters
----------
image : ndarray
The `(H,W,C)` `uint8` image onto which to draw the line strings.
color : iterable of int, optional
Color to use as RGB, i.e. three values.
The color of the lines and points are derived from this value,
unless they are set.
color_lines : None or iterable of int
Color to use for the line segments as RGB, i.e. three values.
If ``None``, this value is derived from `color`.
color_points : None or iterable of int
Color to use for the points as RGB, i.e. three values.
If ``None``, this value is derived from ``0.5 * color``.
alpha : float, optional
Opacity of the line strings. Higher values denote more visible
points.
The alphas of the line and points are derived from this value,
unless they are set.
alpha_lines : None or float, optional
Opacity of the line strings. Higher values denote more visible
line string.
If ``None``, this value is derived from `alpha`.
alpha_points : None or float, optional
Opacity of the line string points. Higher values denote more
visible points.
If ``None``, this value is derived from `alpha`.
size : int, optional
Size of the line strings.
The sizes of the line and points are derived from this value,
unless they are set.
size_lines : None or int, optional
Thickness of the line segments.
If ``None``, this value is derived from `size`.
size_points : None or int, optional
Size of the points in pixels.
If ``None``, this value is derived from ``3 * size``.
antialiased : bool, optional
Whether to draw the lines with anti-aliasing activated.
This does currently not affect the point drawing.
raise_if_out_of_image : bool, optional
Whether to raise an error if a line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Image with line strings drawn on it.
|
def _get_plugin_module_paths(self, plugin_dir):
''' Return a list of every module in `plugin_dir`. '''
filepaths = [
fp for fp in glob.glob('{}/**/*.py'.format(plugin_dir), recursive=True)
if not fp.endswith('__init__.py')
]
rel_paths = [re.sub(plugin_dir.rstrip('/') + '/', '', fp) for fp in filepaths]
module_paths = [rp.replace('/', '.').replace('.py', '') for rp in rel_paths]
return module_paths
|
Return a list of every module in `plugin_dir`.
|
def check_learns_zero_output_rnn(model, sgd, X, Y, initial_hidden=None):
"""Check we can learn to output a zero vector"""
outputs, get_dX = model.begin_update(X, initial_hidden)
Yh, h_n = outputs
tupleDy = (Yh - Y, h_n)
dX = get_dX(tupleDy, sgd=sgd)
prev = numpy.abs(Yh.sum())
print(prev)
for i in range(1000):
outputs, get_dX = model.begin_update(X)
Yh, h_n = outputs
current_sum = numpy.abs(Yh.sum())
tupleDy = (Yh - Y, h_n)
dX = get_dX(tupleDy, sgd=sgd) # noqa: F841
# Should have decreased
print(current_sum)
|
Check we can learn to output a zero vector
|
def uri(self, value):
"""Set new uri value in record.
It will not change the location of the underlying file!
"""
jsonpointer.set_pointer(self.record, self.pointer, value)
|
Set new uri value in record.
It will not change the location of the underlying file!
|
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
|
recursively constructs A line for g; i = len(g)-1
|
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif.filehandle.close()
self._files = {}
|
Close open file handle(s).
|
def _set_bottomMargin(self, value):
"""
value will be an int or float.
Subclasses may override this method.
"""
diff = value - self.bottomMargin
self.moveBy((0, diff))
self.height += diff
|
value will be an int or float.
Subclasses may override this method.
|
def clear_high_level_pars(self):
"""
clears all high level pars display boxes
"""
for val in ['mean_type', 'dec', 'inc', 'alpha95', 'K', 'R', 'n_lines', 'n_planes']:
COMMAND = """self.%s_window.SetValue("")""" % (val)
exec(COMMAND)
if self.ie_open:
for val in ['mean_type', 'dec', 'inc', 'alpha95', 'K', 'R', 'n_lines', 'n_planes']:
COMMAND = """self.ie.%s_window.SetValue("")""" % (val)
exec(COMMAND)
self.set_mean_stats_color()
|
clears all high level pars display boxes
|
def serialize_instance(instance):
'''
Serialize an *instance* from a metamodel.
'''
attr_count = 0
metaclass = xtuml.get_metaclass(instance)
s = 'INSERT INTO %s VALUES (' % metaclass.kind
for name, ty in metaclass.attributes:
value = getattr(instance, name)
s += '\n '
s += serialize_value(value, ty)
attr_count += 1
if attr_count < len(metaclass.attributes):
s += ', -- %s : %s' % (name, ty)
else:
s += ' -- %s : %s' % (name, ty)
s += '\n);\n'
return s
|
Serialize an *instance* from a metamodel.
|
def set_form_widgets_attrs(form, attrs):
"""Applies a given HTML attributes to each field widget of a given form.
Example:
set_form_widgets_attrs(my_form, {'class': 'clickable'})
"""
for _, field in form.fields.items():
attrs_ = dict(attrs)
for name, val in attrs.items():
if hasattr(val, '__call__'):
attrs_[name] = val(field)
field.widget.attrs = field.widget.build_attrs(attrs_)
|
Applies a given HTML attributes to each field widget of a given form.
Example:
set_form_widgets_attrs(my_form, {'class': 'clickable'})
|
def create_unique_transfer_operation_id(src_ase, dst_ase):
# type: (blobxfer.models.azure.StorageEntity,
# blobxfer.models.azure.StorageEntity) -> str
"""Create a unique transfer operation id
:param blobxfer.models.azure.StorageEntity src_ase: src storage entity
:param blobxfer.models.azure.StorageEntity dst_ase: dst storage entity
:rtype: str
:return: unique transfer id
"""
return ';'.join(
(src_ase._client.primary_endpoint, src_ase.path,
dst_ase._client.primary_endpoint, dst_ase.path)
)
|
Create a unique transfer operation id
:param blobxfer.models.azure.StorageEntity src_ase: src storage entity
:param blobxfer.models.azure.StorageEntity dst_ase: dst storage entity
:rtype: str
:return: unique transfer id
|
def dyn_attr(self, dev_list):
"""Invoked to create dynamic attributes for the given devices.
Default implementation calls
:meth:`TT.initialize_dynamic_attributes` for each device
:param dev_list: list of devices
:type dev_list: :class:`tango.DeviceImpl`"""
for dev in dev_list:
init_dyn_attrs = getattr(dev,
"initialize_dynamic_attributes",
None)
if init_dyn_attrs and callable(init_dyn_attrs):
try:
init_dyn_attrs()
except Exception:
dev.warn_stream("Failed to initialize dynamic attributes")
dev.debug_stream("Details: " + traceback.format_exc())
|
Invoked to create dynamic attributes for the given devices.
Default implementation calls
:meth:`TT.initialize_dynamic_attributes` for each device
:param dev_list: list of devices
:type dev_list: :class:`tango.DeviceImpl`
|
def get_parcel(resource_root, product, version, cluster_name="default"):
"""
Lookup a parcel by name
@param resource_root: The root Resource object.
@param product: Parcel product name
@param version: Parcel version
@param cluster_name: Cluster name
@return: An ApiService object
"""
return _get_parcel(resource_root, PARCEL_PATH % (cluster_name, product, version))
|
Lookup a parcel by name
@param resource_root: The root Resource object.
@param product: Parcel product name
@param version: Parcel version
@param cluster_name: Cluster name
@return: An ApiService object
|
def LdKL_dot(self, v, v1=None):
"""
Implements L(∂K)Lᵀv.
The array v can have one or two dimensions and the first dimension has to have
size n⋅p.
Let vec(V) = v. We have
L(∂K)Lᵀ⋅v = ((Lₕ∂C₀Lₕᵀ) ⊗ (LₓGGᵀLₓᵀ))vec(V) = vec(LₓGGᵀLₓᵀVLₕ∂C₀Lₕᵀ),
when the derivative is over the parameters of C₀. Similarly,
L(∂K)Lᵀv = ((Lₕ∂C₁Lₕᵀ) ⊗ (LₓLₓᵀ))vec(V) = vec(LₓLₓᵀVLₕ∂C₁Lₕᵀ),
over the parameters of C₁.
"""
self._init_svd()
def dot(a, b):
r = tensordot(a, b, axes=([1], [0]))
if a.ndim > b.ndim:
return r.transpose([0, 2, 1])
return r
Lh = self.Lh
V = unvec(v, (self.Lx.shape[0], -1) + v.shape[1:])
LdKL_dot = {
"C0.Lu": empty((v.shape[0],) + v.shape[1:] + (self._C0.Lu.shape[0],)),
"C1.Lu": empty((v.shape[0],) + v.shape[1:] + (self._C1.Lu.shape[0],)),
}
dC0 = self._C0.gradient()["Lu"]
for i in range(self._C0.Lu.shape[0]):
t = dot(self._LxG, dot(self._LxG.T, dot(V, Lh @ dC0[..., i] @ Lh.T)))
LdKL_dot["C0.Lu"][..., i] = t.reshape((-1,) + t.shape[2:], order="F")
dC1 = self._C1.gradient()["Lu"]
for i in range(self._C1.Lu.shape[0]):
t = dot(V, Lh @ dC1[..., i] @ Lh.T)
LdKL_dot["C1.Lu"][..., i] = t.reshape((-1,) + t.shape[2:], order="F")
return LdKL_dot
|
Implements L(∂K)Lᵀv.
The array v can have one or two dimensions and the first dimension has to have
size n⋅p.
Let vec(V) = v. We have
L(∂K)Lᵀ⋅v = ((Lₕ∂C₀Lₕᵀ) ⊗ (LₓGGᵀLₓᵀ))vec(V) = vec(LₓGGᵀLₓᵀVLₕ∂C₀Lₕᵀ),
when the derivative is over the parameters of C₀. Similarly,
L(∂K)Lᵀv = ((Lₕ∂C₁Lₕᵀ) ⊗ (LₓLₓᵀ))vec(V) = vec(LₓLₓᵀVLₕ∂C₁Lₕᵀ),
over the parameters of C₁.
|
def make_channel(name, samples, data=None, verbose=False):
"""
Create a Channel from a list of Samples
"""
if verbose:
llog = log['make_channel']
llog.info("creating channel {0}".format(name))
# avoid segfault if name begins with a digit by using "channel_" prefix
chan = Channel('channel_{0}'.format(name))
chan.SetStatErrorConfig(0.05, "Poisson")
if data is not None:
if verbose:
llog.info("setting data")
chan.SetData(data)
for sample in samples:
if verbose:
llog.info("adding sample {0}".format(sample.GetName()))
chan.AddSample(sample)
return chan
|
Create a Channel from a list of Samples
|
def OpenEnumerateInstancePaths(self, ClassName, namespace=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=None, **extra):
# pylint: disable=invalid-name
"""
Open an enumeration session to enumerate the instance paths of
instances of a class (including instances of its subclasses) in
a namespace.
*New in pywbem 0.9.*
This method performs the OpenEnumerateInstancePaths operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns status on the
enumeration session and optionally instance paths.
Otherwise, this method raises an exception.
Use the :meth:`~pywbem.WBEMConnection.PullInstancePaths` method to
retrieve the next set of instance paths or the
:meth:`~pywbem.WBEMConnection.CloseEnumeration` method to close the
enumeration session before it is exhausted.
Parameters:
ClassName (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a :class:`~pywbem.CIMClassName` object, its
`namespace` attribute will be used as a default namespace as
described for the `namespace` parameter, and its `host` attribute
will be ignored.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the namespace of the `ClassName` parameter will be used,
if specified as a :class:`~pywbem.CIMClassName` object. If that is
also `None`, the default namespace of the connection will be used.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return
for this request.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* If zero, the WBEM server is to return no instances. This may
be used by a client to leave the handling of any returned
instances to a loop of Pull operations.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
to return zero instances.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **paths** (:class:`py:list` of :class:`~pywbem.CIMInstanceName`):
Representations of the retrieved instance paths, with their
attributes set as follows:
* `classname`: Name of the creation class of the instance.
* `keybindings`: Keybindings of the instance.
* `namespace`: Name of the CIM namespace containing the instance.
* `host`: Host and optionally port of the WBEM server containing
the CIM namespace.
* **eos** (:class:`py:bool`):
Indicates whether the enumeration session is exhausted after
this operation:
- If `True`, the enumeration session is exhausted, and the
server has closed the enumeration session.
- If `False`, the enumeration session is not exhausted and the
`context` item is the context object for the next operation on
the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session,
including its current enumeration state, and the namespace. This
object must be supplied with the next pull or close operation for
this enumeration session.
The tuple items are:
* server_context (:term:`string`):
Enumeration context string returned by the server if
the session is not exhausted, or `None` otherwise. This string
is opaque for the client.
* namespace (:term:`string`):
Name of the CIM namespace that was used for this operation.
NOTE: This inner tuple hides the need for a CIM namespace
on subsequent operations in the enumeration session. CIM
operations always require target namespace, but it never
makes sense to specify a different one in subsequent
operations on the same enumeration session.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
max_object_count = 100
rslt_tuple = conn.OpenEnumerateInstancePaths(
'CIM_Blah', MaxObjectCount=max_object_count)
paths = rslt_tuple.paths
while not rslt_tuple.eos:
rslt_tuple = conn.PullInstancePaths(rslt_tupl.context,
max_object_count)
paths.extend(rslt_tupl.paths)
for path in paths:
print('path {0}'.format(path))
"""
exc = None
result_tuple = None
method_name = 'OpenEnumerateInstancePaths'
if self._operation_recorders:
self.operation_recorder_reset(pull_op=True)
self.operation_recorder_stage_pywbem_args(
method=method_name,
ClassName=ClassName,
namespace=namespace,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount,
**extra)
try:
stats = self.statistics.start_timer(method_name)
if namespace is None and isinstance(ClassName, CIMClassName):
namespace = ClassName.namespace
namespace = self._iparam_namespace_from_namespace(namespace)
classname = self._iparam_classname(ClassName, 'ClassName')
result = self._imethodcall(
method_name,
namespace,
ClassName=classname,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount,
has_out_params=True,
**extra)
result_tuple = pull_path_result_tuple(
*self._get_rslt_params(result, namespace))
return result_tuple
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(result_tuple, exc)
|
Open an enumeration session to enumerate the instance paths of
instances of a class (including instances of its subclasses) in
a namespace.
*New in pywbem 0.9.*
This method performs the OpenEnumerateInstancePaths operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns status on the
enumeration session and optionally instance paths.
Otherwise, this method raises an exception.
Use the :meth:`~pywbem.WBEMConnection.PullInstancePaths` method to
retrieve the next set of instance paths or the
:meth:`~pywbem.WBEMConnection.CloseEnumeration` method to close the
enumeration session before it is exhausted.
Parameters:
ClassName (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a :class:`~pywbem.CIMClassName` object, its
`namespace` attribute will be used as a default namespace as
described for the `namespace` parameter, and its `host` attribute
will be ignored.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the namespace of the `ClassName` parameter will be used,
if specified as a :class:`~pywbem.CIMClassName` object. If that is
also `None`, the default namespace of the connection will be used.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return
for this request.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* If zero, the WBEM server is to return no instances. This may
be used by a client to leave the handling of any returned
instances to a loop of Pull operations.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
to return zero instances.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **paths** (:class:`py:list` of :class:`~pywbem.CIMInstanceName`):
Representations of the retrieved instance paths, with their
attributes set as follows:
* `classname`: Name of the creation class of the instance.
* `keybindings`: Keybindings of the instance.
* `namespace`: Name of the CIM namespace containing the instance.
* `host`: Host and optionally port of the WBEM server containing
the CIM namespace.
* **eos** (:class:`py:bool`):
Indicates whether the enumeration session is exhausted after
this operation:
- If `True`, the enumeration session is exhausted, and the
server has closed the enumeration session.
- If `False`, the enumeration session is not exhausted and the
`context` item is the context object for the next operation on
the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session,
including its current enumeration state, and the namespace. This
object must be supplied with the next pull or close operation for
this enumeration session.
The tuple items are:
* server_context (:term:`string`):
Enumeration context string returned by the server if
the session is not exhausted, or `None` otherwise. This string
is opaque for the client.
* namespace (:term:`string`):
Name of the CIM namespace that was used for this operation.
NOTE: This inner tuple hides the need for a CIM namespace
on subsequent operations in the enumeration session. CIM
operations always require target namespace, but it never
makes sense to specify a different one in subsequent
operations on the same enumeration session.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
max_object_count = 100
rslt_tuple = conn.OpenEnumerateInstancePaths(
'CIM_Blah', MaxObjectCount=max_object_count)
paths = rslt_tuple.paths
while not rslt_tuple.eos:
rslt_tuple = conn.PullInstancePaths(rslt_tupl.context,
max_object_count)
paths.extend(rslt_tupl.paths)
for path in paths:
print('path {0}'.format(path))
|
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
|
Make the drone move right.
|
def _init_hex(self, hexval: str) -> None:
""" Initialize from a hex value string. """
self.hexval = hex2termhex(fix_hex(hexval))
self.code = hex2term(self.hexval)
self.rgb = hex2rgb(self.hexval)
|
Initialize from a hex value string.
|
def separated(p, sep, mint, maxt=None, end=None):
'''Repeat a parser `p` separated by `s` between `mint` and `maxt` times.
When `end` is None, a trailing separator is optional.
When `end` is True, a trailing separator is required.
When `end` is False, a trailing separator is not allowed.
MATCHES AS MUCH AS POSSIBLE.
Return list of values returned by `p`.'''
maxt = maxt if maxt else mint
@Parser
def sep_parser(text, index):
cnt, values, res = 0, Value.success(index, []), None
while cnt < maxt:
if end in [False, None] and cnt > 0:
res = sep(text, index)
if res.status: # `sep` found, consume it (advance index)
index, values = res.index, Value.success(
res.index, values.value)
elif cnt < mint:
return res # error: need more elemnts, but no `sep` found.
else:
break
res = p(text, index)
if res.status:
values = values.aggregate(
Value.success(res.index, [res.value]))
index, cnt = res.index, cnt + 1
elif cnt >= mint:
break
else:
return res # error: need more elements, but no `p` found.
if end is True:
res = sep(text, index)
if res.status:
index, values = res.index, Value.success(
res.index, values.value)
else:
return res # error: trailing `sep` not found
if cnt >= maxt:
break
return values
return sep_parser
|
Repeat a parser `p` separated by `s` between `mint` and `maxt` times.
When `end` is None, a trailing separator is optional.
When `end` is True, a trailing separator is required.
When `end` is False, a trailing separator is not allowed.
MATCHES AS MUCH AS POSSIBLE.
Return list of values returned by `p`.
|
def get_submission_filenames(self, tournament=None, round_num=None):
"""Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
"""
query = '''
query {
user {
submissions {
filename
selected
round {
tournament
number
}
}
}
}
'''
data = self.raw_query(query, authorization=True)['data']['user']
filenames = [{"round_num": item['round']['number'],
"tournament": item['round']['tournament'],
"filename": item['filename']}
for item in data['submissions'] if item['selected']]
if round_num is not None:
filenames = [f for f in filenames if f['round_num'] == round_num]
if tournament is not None:
filenames = [f for f in filenames if f['tournament'] == tournament]
filenames.sort(key=lambda f: (f['round_num'], f['tournament']))
return filenames
|
Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
|
def window_size(self):
""" returns render window size """
the_size = self.app_window.baseSize()
return the_size.width(), the_size.height()
|
returns render window size
|
def Trim(lst, limit):
"""Trims a given list so that it is not longer than given limit.
Args:
lst: A list to trim.
limit: A maximum number of elements in the list after trimming.
Returns:
A suffix of the input list that was trimmed.
"""
limit = max(0, limit)
clipping = lst[limit:]
del lst[limit:]
return clipping
|
Trims a given list so that it is not longer than given limit.
Args:
lst: A list to trim.
limit: A maximum number of elements in the list after trimming.
Returns:
A suffix of the input list that was trimmed.
|
def validate_username(username):
""" Validate the new username. If the username is invalid, raises
:py:exc:`UsernameInvalid`.
:param username: Username to validate.
"""
# Check username looks ok
if not username.islower():
raise UsernameInvalid(six.u('Username must be all lowercase'))
if len(username) < 2:
raise UsernameInvalid(six.u('Username must be at least 2 characters'))
if not username_re.search(username):
raise UsernameInvalid(settings.USERNAME_VALIDATION_ERROR_MSG)
return username
|
Validate the new username. If the username is invalid, raises
:py:exc:`UsernameInvalid`.
:param username: Username to validate.
|
def switch(self):
"""Switch if time for eAgc has come"""
t = self.system.dae.t
for idx in range(0, self.n):
if t >= self.tl[idx]:
if self.en[idx] == 0:
self.en[idx] = 1
logger.info(
'Extended ACE <{}> activated at t = {}.'.format(
self.idx[idx], t))
|
Switch if time for eAgc has come
|
def get_object(model, meteor_id, *args, **kwargs):
"""Return an object for the given meteor_id."""
# Django model._meta is now public API -> pylint: disable=W0212
meta = model._meta
if isinstance(meta.pk, AleaIdField):
# meteor_id is the primary key
return model.objects.filter(*args, **kwargs).get(pk=meteor_id)
alea_unique_fields = [
field
for field in meta.local_fields
if isinstance(field, AleaIdField) and field.unique and not field.null
]
if len(alea_unique_fields) == 1:
return model.objects.filter(*args, **kwargs).get(**{
alea_unique_fields[0].name: meteor_id,
})
return model.objects.filter(*args, **kwargs).get(
pk=get_object_id(model, meteor_id),
)
|
Return an object for the given meteor_id.
|
def _send_request(self, path, data, method):
"""
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
"""
response_body = self._transport.send_request(path, data, method)
root = etree.fromstring(response_body)
#print(prettyprint_xml(root))
return root
|
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
|
def add_tags(self):
"""Add a Vorbis comment block to the file."""
if self.tags is None:
self.tags = VCFLACDict()
self.metadata_blocks.append(self.tags)
else:
raise FLACVorbisError("a Vorbis comment already exists")
|
Add a Vorbis comment block to the file.
|
def filter(objects, Type=None, min=-1, max=-1): #PYCHOK muppy filter
"""Filter objects.
The filter can be by type, minimum size, and/or maximum size.
Keyword arguments:
Type -- object type to filter by
min -- minimum object size
max -- maximum object size
"""
res = []
if min > max:
raise ValueError("minimum must be smaller than maximum")
if Type is not None:
res = [o for o in objects if isinstance(o, Type)]
if min > -1:
res = [o for o in res if _getsizeof(o) < min]
if max > -1:
res = [o for o in res if _getsizeof(o) > max]
return res
|
Filter objects.
The filter can be by type, minimum size, and/or maximum size.
Keyword arguments:
Type -- object type to filter by
min -- minimum object size
max -- maximum object size
|
async def query_presence(self, query_presence_request):
"""Return presence status for a list of users."""
response = hangouts_pb2.QueryPresenceResponse()
await self._pb_request('presence/querypresence',
query_presence_request, response)
return response
|
Return presence status for a list of users.
|
def HashFilePath(self, path, byte_count):
"""Updates underlying hashers with file on a given path.
Args:
path: A path to the file that is going to be fed to the hashers.
byte_count: A maximum numbers of bytes that are going to be processed.
"""
with open(path, "rb") as fd:
self.HashFile(fd, byte_count)
|
Updates underlying hashers with file on a given path.
Args:
path: A path to the file that is going to be fed to the hashers.
byte_count: A maximum numbers of bytes that are going to be processed.
|
def queue_resize(self):
"""request the element to re-check it's child sprite sizes"""
self._children_resize_queued = True
parent = getattr(self, "parent", None)
if parent and isinstance(parent, graphics.Sprite) and hasattr(parent, "queue_resize"):
parent.queue_resize()
|
request the element to re-check it's child sprite sizes
|
def resubmit(self, job_ids = None, also_success = False, running_jobs = False, new_command=None, keep_logs=False, **kwargs):
"""Re-submit jobs automatically"""
self.lock()
# iterate over all jobs
jobs = self.get_jobs(job_ids)
if new_command is not None:
if len(jobs) == 1:
jobs[0].set_command_line(new_command)
else:
logger.warn("Ignoring new command since no single job id was specified")
accepted_old_status = ('submitted', 'success', 'failure') if also_success else ('submitted', 'failure',)
for job in jobs:
# check if this job needs re-submission
if running_jobs or job.status in accepted_old_status:
if job.queue_name != 'local' and job.status == 'executing':
logger.error("Cannot re-submit job '%s' locally since it is still running in the grid. Use 'jman stop' to stop it\'s execution!", job)
else:
# re-submit job to the grid
logger.info("Re-submitted job '%s' to the database", job)
if not keep_logs:
self.delete_logs(job)
job.submit('local')
self.session.commit()
self.unlock()
|
Re-submit jobs automatically
|
def hdr(data, filename):
"""
write ENVI header files
Parameters
----------
data: str or dict
the file or dictionary to get the info from
filename: str
the HDR file to write
Returns
-------
"""
hdrobj = data if isinstance(data, HDRobject) else HDRobject(data)
hdrobj.write(filename)
|
write ENVI header files
Parameters
----------
data: str or dict
the file or dictionary to get the info from
filename: str
the HDR file to write
Returns
-------
|
def init(self, xml, port, server=None,
server2=None, port2=None,
role=0, exp_uid=None, episode=0,
action_filter=None, resync=0, step_options=0, action_space=None):
""""Initialize a Malmo environment.
xml - the mission xml.
port - the MalmoEnv service's port.
server - the MalmoEnv service address. Default is localhost.
server2 - the MalmoEnv service address for given role if not 0.
port2 - the MalmoEnv service port for given role if not 0.
role - the agent role (0..N-1) for missions with N agents. Defaults to 0.
exp_uid - the experiment's unique identifier. Generated if not given.
episode - the "reset" start count for experiment re-starts. Defaults to 0.
action_filter - an optional list of valid actions to filter by. Defaults to simple commands.
step_options - encodes withTurnKey and withInfo in step messages. Defaults to info included,
turn if required.
"""
if action_filter is None:
action_filter = {"move", "turn", "use", "attack"}
if not xml.startswith('<Mission'):
i = xml.index("<Mission")
if i == -1:
raise EnvException("Mission xml must contain <Mission> tag.")
xml = xml[i:]
self.xml = etree.fromstring(xml)
self.role = role
if exp_uid is None:
self.exp_uid = str(uuid.uuid4())
else:
self.exp_uid = exp_uid
command_parser = CommandParser(action_filter)
commands = command_parser.get_commands_from_xml(self.xml, self.role)
actions = command_parser.get_actions(commands)
# print("role " + str(self.role) + " actions " + str(actions)
if action_space:
self.action_space = action_space
else:
self.action_space = ActionSpace(actions)
self.port = port
if server is not None:
self.server = server
if server2 is not None:
self.server2 = server2
else:
self.server2 = self.server
if port2 is not None:
self.port2 = port2
else:
self.port2 = self.port + self.role
self.agent_count = len(self.xml.findall(self.ns + 'AgentSection'))
turn_based = self.xml.find('.//' + self.ns + 'TurnBasedCommands') is not None
if turn_based:
self.turn_key = 'AKWozEre'
else:
self.turn_key = ""
if step_options is None:
self.step_options = 0 if not turn_based else 2
else:
self.step_options = step_options
self.done = True
# print("agent count " + str(self.agent_count) + " turn based " + turn_based)
self.resync_period = resync
self.resets = episode
e = etree.fromstring("""<MissionInit xmlns="http://ProjectMalmo.microsoft.com"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
SchemaVersion="" PlatformVersion=""" + '\"' + malmo_version + '\"' +
""">
<ExperimentUID></ExperimentUID>
<ClientRole>0</ClientRole>
<ClientAgentConnection>
<ClientIPAddress>127.0.0.1</ClientIPAddress>
<ClientMissionControlPort>0</ClientMissionControlPort>
<ClientCommandsPort>0</ClientCommandsPort>
<AgentIPAddress>127.0.0.1</AgentIPAddress>
<AgentMissionControlPort>0</AgentMissionControlPort>
<AgentVideoPort>0</AgentVideoPort>
<AgentDepthPort>0</AgentDepthPort>
<AgentLuminancePort>0</AgentLuminancePort>
<AgentObservationsPort>0</AgentObservationsPort>
<AgentRewardsPort>0</AgentRewardsPort>
<AgentColourMapPort>0</AgentColourMapPort>
</ClientAgentConnection>
</MissionInit>""")
e.insert(0, self.xml)
self.xml = e
self.xml.find(self.ns + 'ClientRole').text = str(self.role)
self.xml.find(self.ns + 'ExperimentUID').text = self.exp_uid
if self.role != 0 and self.agent_count > 1:
e = etree.Element(self.ns + 'MinecraftServerConnection',
attrib={'address': self.server,
'port': str(0)
})
self.xml.insert(2, e)
video_producers = self.xml.findall('.//' + self.ns + 'VideoProducer')
assert len(video_producers) == self.agent_count
video_producer = video_producers[self.role]
self.width = int(video_producer.find(self.ns + 'Width').text)
self.height = int(video_producer.find(self.ns + 'Height').text)
want_depth = video_producer.attrib["want_depth"]
self.depth = 4 if want_depth is not None and (want_depth == "true" or want_depth == "1") else 3
# print(str(self.width) + "x" + str(self.height) + "x" + str(self.depth))
self.observation_space = VisualObservationSpace(self.width, self.height, self.depth)
|
Initialize a Malmo environment.
xml - the mission xml.
port - the MalmoEnv service's port.
server - the MalmoEnv service address. Default is localhost.
server2 - the MalmoEnv service address for given role if not 0.
port2 - the MalmoEnv service port for given role if not 0.
role - the agent role (0..N-1) for missions with N agents. Defaults to 0.
exp_uid - the experiment's unique identifier. Generated if not given.
episode - the "reset" start count for experiment re-starts. Defaults to 0.
action_filter - an optional list of valid actions to filter by. Defaults to simple commands.
step_options - encodes withTurnKey and withInfo in step messages. Defaults to info included,
turn if required.
|
def coupling_to_arc(coupling_map:List[List[int]]) -> Architecture:
"""
Produces a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Architecture` corresponding to a (directed) coupling map,
stating the pairs of qubits between which two-qubit interactions
(e.g. CXs) can be applied.
:param coupling_map: Pairs of indices where each pair [control, target]
permits the use of CXs between them
:return: The :math:`\\mathrm{t|ket}\\rangle` :py:class:`Architecture` capturing the behaviour of the coupling map
"""
coupling = CouplingMap(couplinglist=coupling_map)
return DirectedGraph(coupling_map,coupling.size())
|
Produces a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Architecture` corresponding to a (directed) coupling map,
stating the pairs of qubits between which two-qubit interactions
(e.g. CXs) can be applied.
:param coupling_map: Pairs of indices where each pair [control, target]
permits the use of CXs between them
:return: The :math:`\\mathrm{t|ket}\\rangle` :py:class:`Architecture` capturing the behaviour of the coupling map
|
def _get_stddevs(self, C, rup, shape, stddev_types):
"""
Return standard deviations as defined in p. 971.
"""
weight = self._compute_weight_std(C, rup.mag)
std_intra = weight * C["sd1"] * np.ones(shape)
std_inter = weight * C["sd2"] * np.ones(shape)
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(std_intra ** 2. + std_inter ** 2.))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(std_intra)
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(std_inter)
return stddevs
|
Return standard deviations as defined in p. 971.
|
def predict_mu(self, X):
"""
preduct expected value of target given model and input X
Parameters
---------
X : array-like of shape (n_samples, m_features),
containing the input dataset
Returns
-------
y : np.array of shape (n_samples,)
containing expected values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
lp = self._linear_predictor(X)
return self.link.mu(lp, self.distribution)
|
preduct expected value of target given model and input X
Parameters
---------
X : array-like of shape (n_samples, m_features),
containing the input dataset
Returns
-------
y : np.array of shape (n_samples,)
containing expected values under the model
|
def save_auxiliary_files(self, layer, destination):
"""Save auxiliary files when using the 'save as' function.
If some auxiliary files (.xml, .json) exist, this function will
copy them when the 'save as' function is used on the layer.
:param layer: The layer which has been saved as.
:type layer: QgsMapLayer
:param destination: The new filename of the layer.
:type destination: str
"""
enable_busy_cursor()
auxiliary_files = ['xml', 'json']
for auxiliary_file in auxiliary_files:
source_basename = os.path.splitext(layer.source())[0]
source_file = "%s.%s" % (source_basename, auxiliary_file)
destination_basename = os.path.splitext(destination)[0]
destination_file = "%s.%s" % (destination_basename, auxiliary_file)
# noinspection PyBroadException,PyBroadException
try:
if os.path.isfile(source_file):
shutil.copy(source_file, destination_file)
except (OSError, IOError):
display_critical_message_bar(
title=self.tr('Error while saving'),
message=self.tr(
'The destination location must be writable.'),
iface_object=self.iface
)
except Exception: # pylint: disable=broad-except
display_critical_message_bar(
title=self.tr('Error while saving'),
message=self.tr('Something went wrong.'),
iface_object=self.iface
)
disable_busy_cursor()
|
Save auxiliary files when using the 'save as' function.
If some auxiliary files (.xml, .json) exist, this function will
copy them when the 'save as' function is used on the layer.
:param layer: The layer which has been saved as.
:type layer: QgsMapLayer
:param destination: The new filename of the layer.
:type destination: str
|
def nnz_obs(self):
""" get the number of non-zero weighted observations
Returns
-------
nnz_obs : int
the number of non-zeros weighted observations
"""
nnz = 0
for w in self.observation_data.weight:
if w > 0.0:
nnz += 1
return nnz
|
get the number of non-zero weighted observations
Returns
-------
nnz_obs : int
the number of non-zeros weighted observations
|
def parallel_evaluation_mp(candidates, args):
"""Evaluate the candidates in parallel using ``multiprocessing``.
This function allows parallel evaluation of candidate solutions.
It uses the standard multiprocessing library to accomplish the
parallelization. The function assigns the evaluation of each
candidate to its own job, all of which are then distributed to the
available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *mp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *mp_nprocs* -- number of processors that will be used (default machine
cpu count)
"""
import time
import multiprocessing
logger = args['_ec'].logger
try:
evaluator = args['mp_evaluator']
except KeyError:
logger.error('parallel_evaluation_mp requires \'mp_evaluator\' be defined in the keyword arguments list')
raise
try:
nprocs = args['mp_nprocs']
except KeyError:
nprocs = multiprocessing.cpu_count()
pickled_args = {}
for key in args:
try:
pickle.dumps(args[key])
pickled_args[key] = args[key]
except (TypeError, pickle.PickleError, pickle.PicklingError):
logger.debug('unable to pickle args parameter {0} in parallel_evaluation_mp'.format(key))
pass
start = time.time()
try:
pool = multiprocessing.Pool(processes=nprocs)
results = [pool.apply_async(evaluator, ([c], pickled_args)) for c in candidates]
pool.close()
pool.join()
return [r.get()[0] for r in results]
except (OSError, RuntimeError) as e:
logger.error('failed parallel_evaluation_mp: {0}'.format(str(e)))
raise
else:
end = time.time()
logger.debug('completed parallel_evaluation_mp in {0} seconds'.format(end - start))
|
Evaluate the candidates in parallel using ``multiprocessing``.
This function allows parallel evaluation of candidate solutions.
It uses the standard multiprocessing library to accomplish the
parallelization. The function assigns the evaluation of each
candidate to its own job, all of which are then distributed to the
available processing units.
.. note::
All arguments to the evaluation function must be pickleable.
Those that are not will not be sent through the ``args`` variable
and will be unavailable to your function.
.. Arguments:
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *mp_evaluator* -- actual evaluation function to be used (This function
should have the same signature as any other inspyred evaluation function.)
Optional keyword arguments in args:
- *mp_nprocs* -- number of processors that will be used (default machine
cpu count)
|
def batch_filter(self, zs, Fs=None, Qs=None, Hs=None,
Rs=None, Bs=None, us=None, update_first=False,
saver=None):
""" Batch processes a sequences of measurements.
Parameters
----------
zs : list-like
list of measurements at each time step `self.dt`. Missing
measurements must be represented by `None`.
Fs : None, list-like, default=None
optional value or list of values to use for the state transition
matrix F.
If Fs is None then self.F is used for all epochs.
Otherwise it must contain a list-like list of F's, one for
each epoch. This allows you to have varying F per epoch.
Qs : None, np.array or list-like, default=None
optional value or list of values to use for the process error
covariance Q.
If Qs is None then self.Q is used for all epochs.
Otherwise it must contain a list-like list of Q's, one for
each epoch. This allows you to have varying Q per epoch.
Hs : None, np.array or list-like, default=None
optional list of values to use for the measurement matrix H.
If Hs is None then self.H is used for all epochs.
If Hs contains a single matrix, then it is used as H for all
epochs.
Otherwise it must contain a list-like list of H's, one for
each epoch. This allows you to have varying H per epoch.
Rs : None, np.array or list-like, default=None
optional list of values to use for the measurement error
covariance R.
If Rs is None then self.R is used for all epochs.
Otherwise it must contain a list-like list of R's, one for
each epoch. This allows you to have varying R per epoch.
Bs : None, np.array or list-like, default=None
optional list of values to use for the control transition matrix B.
If Bs is None then self.B is used for all epochs.
Otherwise it must contain a list-like list of B's, one for
each epoch. This allows you to have varying B per epoch.
us : None, np.array or list-like, default=None
optional list of values to use for the control input vector;
If us is None then None is used for all epochs (equivalent to 0,
or no control input).
Otherwise it must contain a list-like list of u's, one for
each epoch.
update_first : bool, optional, default=False
controls whether the order of operations is update followed by
predict, or predict followed by update. Default is predict->update.
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
means : np.array((n,dim_x,1))
array of the state for each time step after the update. Each entry
is an np.array. In other words `means[k,:]` is the state at step
`k`.
covariance : np.array((n,dim_x,dim_x))
array of the covariances for each time step after the update.
In other words `covariance[k,:,:]` is the covariance at step `k`.
means_predictions : np.array((n,dim_x,1))
array of the state for each time step after the predictions. Each
entry is an np.array. In other words `means[k,:]` is the state at
step `k`.
covariance_predictions : np.array((n,dim_x,dim_x))
array of the covariances for each time step after the prediction.
In other words `covariance[k,:,:]` is the covariance at step `k`.
Examples
--------
.. code-block:: Python
# this example demonstrates tracking a measurement where the time
# between measurement varies, as stored in dts. This requires
# that F be recomputed for each epoch. The output is then smoothed
# with an RTS smoother.
zs = [t + random.randn()*4 for t in range (40)]
Fs = [np.array([[1., dt], [0, 1]] for dt in dts]
(mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs)
(xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs)
"""
#pylint: disable=too-many-statements
n = np.size(zs, 0)
if Fs is None:
Fs = [self.F] * n
if Qs is None:
Qs = [self.Q] * n
if Hs is None:
Hs = [self.H] * n
if Rs is None:
Rs = [self.R] * n
if Bs is None:
Bs = [self.B] * n
if us is None:
us = [0] * n
# mean estimates from Kalman Filter
if self.x.ndim == 1:
means = zeros((n, self.dim_x))
means_p = zeros((n, self.dim_x))
else:
means = zeros((n, self.dim_x, 1))
means_p = zeros((n, self.dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n, self.dim_x, self.dim_x))
covariances_p = zeros((n, self.dim_x, self.dim_x))
if update_first:
for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
self.update(z, R=R, H=H)
means[i, :] = self.x
covariances[i, :, :] = self.P
self.predict(u=u, B=B, F=F, Q=Q)
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
if saver is not None:
saver.save()
else:
for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
self.predict(u=u, B=B, F=F, Q=Q)
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
self.update(z, R=R, H=H)
means[i, :] = self.x
covariances[i, :, :] = self.P
if saver is not None:
saver.save()
return (means, covariances, means_p, covariances_p)
|
Batch processes a sequences of measurements.
Parameters
----------
zs : list-like
list of measurements at each time step `self.dt`. Missing
measurements must be represented by `None`.
Fs : None, list-like, default=None
optional value or list of values to use for the state transition
matrix F.
If Fs is None then self.F is used for all epochs.
Otherwise it must contain a list-like list of F's, one for
each epoch. This allows you to have varying F per epoch.
Qs : None, np.array or list-like, default=None
optional value or list of values to use for the process error
covariance Q.
If Qs is None then self.Q is used for all epochs.
Otherwise it must contain a list-like list of Q's, one for
each epoch. This allows you to have varying Q per epoch.
Hs : None, np.array or list-like, default=None
optional list of values to use for the measurement matrix H.
If Hs is None then self.H is used for all epochs.
If Hs contains a single matrix, then it is used as H for all
epochs.
Otherwise it must contain a list-like list of H's, one for
each epoch. This allows you to have varying H per epoch.
Rs : None, np.array or list-like, default=None
optional list of values to use for the measurement error
covariance R.
If Rs is None then self.R is used for all epochs.
Otherwise it must contain a list-like list of R's, one for
each epoch. This allows you to have varying R per epoch.
Bs : None, np.array or list-like, default=None
optional list of values to use for the control transition matrix B.
If Bs is None then self.B is used for all epochs.
Otherwise it must contain a list-like list of B's, one for
each epoch. This allows you to have varying B per epoch.
us : None, np.array or list-like, default=None
optional list of values to use for the control input vector;
If us is None then None is used for all epochs (equivalent to 0,
or no control input).
Otherwise it must contain a list-like list of u's, one for
each epoch.
update_first : bool, optional, default=False
controls whether the order of operations is update followed by
predict, or predict followed by update. Default is predict->update.
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
means : np.array((n,dim_x,1))
array of the state for each time step after the update. Each entry
is an np.array. In other words `means[k,:]` is the state at step
`k`.
covariance : np.array((n,dim_x,dim_x))
array of the covariances for each time step after the update.
In other words `covariance[k,:,:]` is the covariance at step `k`.
means_predictions : np.array((n,dim_x,1))
array of the state for each time step after the predictions. Each
entry is an np.array. In other words `means[k,:]` is the state at
step `k`.
covariance_predictions : np.array((n,dim_x,dim_x))
array of the covariances for each time step after the prediction.
In other words `covariance[k,:,:]` is the covariance at step `k`.
Examples
--------
.. code-block:: Python
# this example demonstrates tracking a measurement where the time
# between measurement varies, as stored in dts. This requires
# that F be recomputed for each epoch. The output is then smoothed
# with an RTS smoother.
zs = [t + random.randn()*4 for t in range (40)]
Fs = [np.array([[1., dt], [0, 1]] for dt in dts]
(mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs)
(xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs)
|
def getSiblings(self, textId, subreference):
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:param reference: CapitainsCtsPassage Reference
:return: GetPrevNextUrn request response from the endpoint
"""
textId = "{}:{}".format(textId, subreference)
return self.getPrevNextUrn(urn=textId)
|
Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:param reference: CapitainsCtsPassage Reference
:return: GetPrevNextUrn request response from the endpoint
|
def acknowledge_host_problem(self, host, sticky, notify, author, comment):
"""Acknowledge a host problem
Format of the line that triggers function call::
ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>;
<comment>
:param host: host to acknowledge the problem
:type host: alignak.objects.host.Host
:param sticky: if sticky == 2, the acknowledge will remain until the host returns to an
UP state else the acknowledge will be removed as soon as the host state changes
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:return: None
TODO: add a better ACK management
"""
notification_period = None
if getattr(host, 'notification_period', None) is not None:
notification_period = self.daemon.timeperiods[host.notification_period]
host.acknowledge_problem(notification_period, self.hosts, self.services, sticky,
notify, author, comment)
|
Acknowledge a host problem
Format of the line that triggers function call::
ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>;
<comment>
:param host: host to acknowledge the problem
:type host: alignak.objects.host.Host
:param sticky: if sticky == 2, the acknowledge will remain until the host returns to an
UP state else the acknowledge will be removed as soon as the host state changes
:type sticky: integer
:param notify: if to 1, send a notification
:type notify: integer
:param author: name of the author or the acknowledge
:type author: str
:param comment: comment (description) of the acknowledge
:type comment: str
:return: None
TODO: add a better ACK management
|
def create_url(urlbase, urlargd, escape_urlargd=True, urlhash=None):
"""Creates a W3C compliant URL. Output will look like this:
'urlbase?param1=value1&param2=value2'
@param urlbase: base url (e.g. config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
"""
separator = '&'
output = urlbase
if urlargd:
output += '?'
if escape_urlargd:
arguments = [escape(quote(str(key)), quote=True) + '=' +
escape(quote(str(urlargd[key])), quote=True)
for key in urlargd.keys()]
else:
arguments = [str(key) + '=' + str(urlargd[key])
for key in urlargd.keys()]
output += separator.join(arguments)
if urlhash:
output += "#" + escape(quote(str(urlhash)))
return output
|
Creates a W3C compliant URL. Output will look like this:
'urlbase?param1=value1&param2=value2'
@param urlbase: base url (e.g. config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
|
def is_python_binding_installed_on_pip(self):
"""Check if the Python binding has already installed."""
pip_version = self._get_pip_version()
Log.debug('Pip version: {0}'.format(pip_version))
pip_major_version = int(pip_version.split('.')[0])
installed = False
# --format is from pip v9.0.0
# https://pip.pypa.io/en/stable/news/
if pip_major_version >= 9:
json_obj = self._get_pip_list_json_obj()
for package in json_obj:
Log.debug('pip list: {0}'.format(package))
if package['name'] in ('rpm-python', 'rpm'):
installed = True
Log.debug('Package installed: {0}, {1}'.format(
package['name'], package['version']))
break
else:
# Implementation for pip old version.
# It will be removed in the future.
lines = self._get_pip_list_lines()
for line in lines:
if re.match('^rpm(-python)? ', line):
installed = True
Log.debug('Package installed.')
break
return installed
|
Check if the Python binding has already installed.
|
def assertFileSizeLess(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than ``size`` as
determined by the '<' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLess(fsize, size, msg=msg)
|
Fail if ``filename``'s size is not less than ``size`` as
determined by the '<' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
|
def _printf(self, *args, **kwargs):
'''Print to configured stream if any is specified and the file argument
is not already set for this specific call.
'''
if self._stream and not kwargs.get('file'):
kwargs['file'] = self._stream
_printf(*args, **kwargs)
|
Print to configured stream if any is specified and the file argument
is not already set for this specific call.
|
def _get_deploy_image_params(data_holder, host_info, vm_name):
"""
:type data_holder: models.vCenterVMFromImageResourceModel.vCenterVMFromImageResourceModel
"""
image_params = OvfImageParams()
if hasattr(data_holder, 'vcenter_image_arguments') and data_holder.vcenter_image_arguments:
image_params.user_arguments = data_holder.vcenter_image_arguments
if hasattr(data_holder, 'vm_location') and data_holder.vm_location:
image_params.vm_folder = data_holder.vm_location.replace(data_holder.default_datacenter + '/', '')
image_params.cluster = data_holder.vm_cluster
image_params.resource_pool = data_holder.vm_resource_pool
image_params.connectivity = host_info
image_params.vm_name = vm_name
image_params.datastore = data_holder.vm_storage
image_params.datacenter = data_holder.default_datacenter
image_params.image_url = data_holder.vcenter_image
image_params.power_on = False
image_params.vcenter_name = data_holder.vcenter_name
return image_params
|
:type data_holder: models.vCenterVMFromImageResourceModel.vCenterVMFromImageResourceModel
|
def _asciify_dict(data):
""" Ascii-fies dict keys and values """
ret = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = _remove_accents(key)
key = key.encode('utf-8')
# # note new if
if isinstance(value, unicode):
value = _remove_accents(value)
value = value.encode('utf-8')
elif isinstance(value, list):
value = _asciify_list(value)
elif isinstance(value, dict):
value = _asciify_dict(value)
ret[key] = value
return ret
|
Ascii-fies dict keys and values
|
def do_ranges_intersect(begin, end, old_begin, old_end):
"""
Determine if the two given memory address ranges intersect.
@type begin: int
@param begin: Start address of the first range.
@type end: int
@param end: End address of the first range.
@type old_begin: int
@param old_begin: Start address of the second range.
@type old_end: int
@param old_end: End address of the second range.
@rtype: bool
@return: C{True} if the two ranges intersect, C{False} otherwise.
"""
return (old_begin <= begin < old_end) or \
(old_begin < end <= old_end) or \
(begin <= old_begin < end) or \
(begin < old_end <= end)
|
Determine if the two given memory address ranges intersect.
@type begin: int
@param begin: Start address of the first range.
@type end: int
@param end: End address of the first range.
@type old_begin: int
@param old_begin: Start address of the second range.
@type old_end: int
@param old_end: End address of the second range.
@rtype: bool
@return: C{True} if the two ranges intersect, C{False} otherwise.
|
def merge_and_fit(self, segment):
""" Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self
"""
self.points = sort_segment_points(self.points, segment.points)
return self
|
Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self
|
def fill_subparser(subparser):
"""Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
subparser.add_argument(
'--channels', type=int, default=1,
help=("The number of audio channels to convert to. The default of 1"
"means audio is converted to mono.")
)
subparser.add_argument(
'--sample', type=int, default=16000,
help=("The sampling rate in Hz. The default of 16000 is "
"significantly downsampled compared to normal WAVE files; "
"pass 44100 for the usual sampling rate.")
)
return convert_youtube_audio
|
Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
|
def open(self, name, flags, preferred_fd=None):
"""
Open a symbolic file. Basically open(2).
:param name: Path of the symbolic file, as a string or bytes.
:type name: string or bytes
:param flags: File operation flags, a bitfield of constants from open(2), as an AST
:param preferred_fd: Assign this fd if it's not already claimed.
:return: The file descriptor number allocated (maps through posix.get_fd to a SimFileDescriptor)
or None if the open fails.
``mode`` from open(2) is unsupported at present.
"""
if len(name) == 0:
return None
if type(name) is str:
name = name.encode()
# FIXME: HACK
if self.uid != 0 and name.startswith(b'/var/run'):
return None
# TODO: speed this up (editor's note: ...really? this is fine)
fd = None
if preferred_fd is not None and preferred_fd not in self.fd:
fd = preferred_fd
else:
fd = self._pick_fd()
flags = self.state.solver.eval(flags)
writing = (flags & Flags.O_ACCMODE) in (Flags.O_RDWR, Flags.O_WRONLY)
simfile = self.state.fs.get(name)
if simfile is None:
ident = SimFile.make_ident(name)
if not writing:
if options.ALL_FILES_EXIST not in self.state.options:
return None
l.warning("Trying to open unknown file %s - created a symbolic file since ALL_FILES_EXIST is set", name)
simfile = SimFile(name, ident=ident, size=self.state.solver.BVS('filesize_%s' % ident, self.state.arch.bits, key=('file', ident, 'filesize'), eternal=True))
else:
simfile = SimFile(name, ident=ident)
if not self.state.fs.insert(name, simfile):
return None
simfd = SimFileDescriptor(simfile, flags)
simfd.set_state(self.state)
self.fd[fd] = simfd
return fd
|
Open a symbolic file. Basically open(2).
:param name: Path of the symbolic file, as a string or bytes.
:type name: string or bytes
:param flags: File operation flags, a bitfield of constants from open(2), as an AST
:param preferred_fd: Assign this fd if it's not already claimed.
:return: The file descriptor number allocated (maps through posix.get_fd to a SimFileDescriptor)
or None if the open fails.
``mode`` from open(2) is unsupported at present.
|
def sub_path(self):
"""The path of the partition source, excluding the bundle path parts.
Includes the revision.
"""
try:
return os.path.join(*(self._local_parts()))
except TypeError as e:
raise TypeError(
"Path failed for partition {} : {}".format(
self.name,
e.message))
|
The path of the partition source, excluding the bundle path parts.
Includes the revision.
|
def _sortkey(self, key='uri', language='any'):
'''
Provide a single sortkey for this conceptscheme.
:param string key: Either `uri`, `label` or `sortlabel`.
:param string language: The preferred language to receive the label in
if key is `label` or `sortlabel`. This should be a valid IANA language tag.
:rtype: :class:`str`
'''
if key == 'uri':
return self.uri
else:
l = label(self.labels, language, key == 'sortlabel')
return l.label.lower() if l else ''
|
Provide a single sortkey for this conceptscheme.
:param string key: Either `uri`, `label` or `sortlabel`.
:param string language: The preferred language to receive the label in
if key is `label` or `sortlabel`. This should be a valid IANA language tag.
:rtype: :class:`str`
|
def _bundle_exists(self, path):
"""Checks if a bundle exists at the provided path
:param path: Bundle path
:return: bool
"""
for attached_bundle in self._attached_bundles:
if path == attached_bundle.path:
return True
return False
|
Checks if a bundle exists at the provided path
:param path: Bundle path
:return: bool
|
def QueryUsers(self, database_link, query, options=None):
"""Queries users in a database.
:param str database_link:
The link to the database.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link, 'users')
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'users',
database_id,
lambda r: r['Users'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn)
|
Queries users in a database.
:param str database_link:
The link to the database.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Users.
:rtype:
query_iterable.QueryIterable
|
def from_environment_or_defaults(cls, environment=None):
"""Create a Run object taking values from the local environment where possible.
The run ID comes from WANDB_RUN_ID or is randomly generated.
The run mode ("dryrun", or "run") comes from WANDB_MODE or defaults to "dryrun".
The run directory comes from WANDB_RUN_DIR or is generated from the run ID.
The Run will have a .config attribute but its run directory won't be set by
default.
"""
if environment is None:
environment = os.environ
run_id = environment.get(env.RUN_ID)
resume = environment.get(env.RESUME)
storage_id = environment.get(env.RUN_STORAGE_ID)
mode = environment.get(env.MODE)
disabled = InternalApi().disabled()
if not mode and disabled:
mode = "dryrun"
elif disabled and mode != "dryrun":
wandb.termlog(
"WARNING: WANDB_MODE is set to run, but W&B was disabled. Run `wandb on` to remove this message")
elif disabled:
wandb.termlog(
'W&B is disabled in this directory. Run `wandb on` to enable cloud syncing.')
group = environment.get(env.RUN_GROUP)
job_type = environment.get(env.JOB_TYPE)
run_dir = environment.get(env.RUN_DIR)
sweep_id = environment.get(env.SWEEP_ID)
program = environment.get(env.PROGRAM)
description = environment.get(env.DESCRIPTION)
args = env.get_args()
wandb_dir = env.get_dir()
tags = env.get_tags()
config = Config.from_environment_or_defaults()
run = cls(run_id, mode, run_dir,
group, job_type, config,
sweep_id, storage_id, program=program, description=description,
args=args, wandb_dir=wandb_dir, tags=tags,
resume=resume)
return run
|
Create a Run object taking values from the local environment where possible.
The run ID comes from WANDB_RUN_ID or is randomly generated.
The run mode ("dryrun", or "run") comes from WANDB_MODE or defaults to "dryrun".
The run directory comes from WANDB_RUN_DIR or is generated from the run ID.
The Run will have a .config attribute but its run directory won't be set by
default.
|
def clean(tf_matrix,
tf_matrix_gene_names,
target_gene_name):
"""
:param tf_matrix: numpy array. The full transcription factor matrix.
:param tf_matrix_gene_names: the full list of transcription factor names, corresponding to the tf_matrix columns.
:param target_gene_name: the target gene to remove from the tf_matrix and tf_names.
:return: a tuple of (matrix, names) equal to the specified ones minus the target_gene_name if the target happens
to be one of the transcription factors. If not, the specified (tf_matrix, tf_names) is returned verbatim.
"""
if target_gene_name not in tf_matrix_gene_names:
clean_tf_matrix = tf_matrix
else:
clean_tf_matrix = np.delete(tf_matrix, tf_matrix_gene_names.index(target_gene_name), 1)
clean_tf_names = [tf for tf in tf_matrix_gene_names if tf != target_gene_name]
assert clean_tf_matrix.shape[1] == len(clean_tf_names) # sanity check
return clean_tf_matrix, clean_tf_names
|
:param tf_matrix: numpy array. The full transcription factor matrix.
:param tf_matrix_gene_names: the full list of transcription factor names, corresponding to the tf_matrix columns.
:param target_gene_name: the target gene to remove from the tf_matrix and tf_names.
:return: a tuple of (matrix, names) equal to the specified ones minus the target_gene_name if the target happens
to be one of the transcription factors. If not, the specified (tf_matrix, tf_names) is returned verbatim.
|
def runExperiment(args, model=None):
"""
Run a single OPF experiment.
.. note:: The caller is responsible for initializing python logging before
calling this function (e.g., import :mod:`nupic.support`;
:meth:`nupic.support.initLogging`)
See also: :meth:`.initExperimentPrng`.
:param args: (string) Experiment command-line args list. Too see all options,
run with ``--help``:
.. code-block:: text
Options:
-h, --help show this help message and exit
-c <CHECKPOINT> Create a model and save it under the given <CHECKPOINT>
name, but don't run it
--listCheckpoints List all available checkpoints
--listTasks List all task labels in description.py
--load=<CHECKPOINT> Load a model from the given <CHECKPOINT> and run it.
Run with --listCheckpoints flag for more details.
--newSerialization Use new capnproto serialization
--tasks Run the tasks with the given TASK LABELS in the order
they are given. Either end of arg-list, or a
standalone dot ('.') arg or the next short or long
option name (-a or --blah) terminates the list. NOTE:
FAILS TO RECOGNIZE task label names with one or more
leading dashes. [default: run all of the tasks in
description.py]
--testMode Reduce iteration count for testing
--noCheckpoint Don't checkpoint the model after running each task.
:param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may
pass in an existing OPF Model to use instead of creating a new one.
:returns: (:class:`~nupic.frameworks.opf.model.Model`)
reference to OPF Model instance that was constructed (this
is provided to aid with debugging) or None, if none was
created.
"""
# Parse command-line options
opt = _parseCommandLineOptions(args)
#print "runExperiment: Parsed Command Options: ", opt
model = _runExperimentImpl(opt, model)
return model
|
Run a single OPF experiment.
.. note:: The caller is responsible for initializing python logging before
calling this function (e.g., import :mod:`nupic.support`;
:meth:`nupic.support.initLogging`)
See also: :meth:`.initExperimentPrng`.
:param args: (string) Experiment command-line args list. Too see all options,
run with ``--help``:
.. code-block:: text
Options:
-h, --help show this help message and exit
-c <CHECKPOINT> Create a model and save it under the given <CHECKPOINT>
name, but don't run it
--listCheckpoints List all available checkpoints
--listTasks List all task labels in description.py
--load=<CHECKPOINT> Load a model from the given <CHECKPOINT> and run it.
Run with --listCheckpoints flag for more details.
--newSerialization Use new capnproto serialization
--tasks Run the tasks with the given TASK LABELS in the order
they are given. Either end of arg-list, or a
standalone dot ('.') arg or the next short or long
option name (-a or --blah) terminates the list. NOTE:
FAILS TO RECOGNIZE task label names with one or more
leading dashes. [default: run all of the tasks in
description.py]
--testMode Reduce iteration count for testing
--noCheckpoint Don't checkpoint the model after running each task.
:param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may
pass in an existing OPF Model to use instead of creating a new one.
:returns: (:class:`~nupic.frameworks.opf.model.Model`)
reference to OPF Model instance that was constructed (this
is provided to aid with debugging) or None, if none was
created.
|
def debug(self, msg, indent=0, **kwargs):
"""invoke ``self.logger.debug``"""
return self.logger.debug(self._indent(msg, indent), **kwargs)
|
invoke ``self.logger.debug``
|
def shift (*args):
"""`shift()` returns the leftmost element of `argv`.
`shitf(integer)` return the `integer` leftmost elements of `argv` as a list.
`shift(iterable)` and `shift(iterable, integer)` operate over `iterable`."""
if len(args) > 2:
raise ValueError("shift() takes 0, 1 or 2 arguments.")
n = 1
l = ayrton.runner.globals['argv']
logger.debug2("%s(%d)", args, len(args))
if len(args) == 1:
value = args[0]
logger.debug2(type(value))
if isinstance(value, int):
n = value
elif isinstance(value, Iterable):
l = value
else:
raise ValueError("First parameter must be Iterable or int().")
elif len(args) == 2:
l, n = args
logger.debug2("%s(%d)", args, len(args))
logger.debug("%s[%d]", l, n)
if n == 1:
ans= l.pop(0)
elif n > 1:
ans= [ l.pop(0) for i in range(n) ]
else:
raise ValueError("Integer parameter must be >= 0.")
return ans
|
`shift()` returns the leftmost element of `argv`.
`shitf(integer)` return the `integer` leftmost elements of `argv` as a list.
`shift(iterable)` and `shift(iterable, integer)` operate over `iterable`.
|
def decode_to_unicode(content):
""" decode ISO-8859-1 to unicode, when using sf api """
if content and not isinstance(content, str):
try:
# Try to decode ISO-8859-1 to unicode
return content.decode("ISO-8859-1")
except UnicodeEncodeError:
# Assume content is unicode already
return content
return content
|
decode ISO-8859-1 to unicode, when using sf api
|
def _render_mail(self, rebuild, success, auto_canceled, manual_canceled):
"""Render and return subject and body of the mail to send."""
subject_template = '%(endstate)s building image %(image_name)s'
body_template = '\n'.join([
'Image Name: %(image_name)s',
'Repositories: %(repositories)s',
'Status: %(endstate)s',
'Submitted by: %(user)s',
])
# Failed autorebuilds include logs as attachments.
# Koji integration stores logs in successful Koji Builds.
# Don't include logs in these cases.
if self.session and not rebuild:
body_template += '\nLogs: %(logs)s'
endstate = None
if auto_canceled or manual_canceled:
endstate = 'Canceled'
else:
endstate = 'Succeeded' if success else 'Failed'
url = self._get_logs_url()
image_name, repos = self._get_image_name_and_repos()
repositories = ''
for repo in repos:
repositories += '\n ' + repo
formatting_dict = {
'repositories': repositories,
'image_name': image_name,
'endstate': endstate,
'user': '<autorebuild>' if rebuild else self.submitter,
'logs': url
}
vcs = self.workflow.source.get_vcs_info()
if vcs:
body_template = '\n'.join([
body_template,
'Source url: %(vcs-url)s',
'Source ref: %(vcs-ref)s',
])
formatting_dict['vcs-url'] = vcs.vcs_url
formatting_dict['vcs-ref'] = vcs.vcs_ref
log_files = None
if rebuild and endstate == 'Failed':
log_files = self._fetch_log_files()
return (subject_template % formatting_dict, body_template % formatting_dict, log_files)
|
Render and return subject and body of the mail to send.
|
async def _discard(self, path, *, recurse=None, separator=None, cas=None):
"""Deletes the Key
"""
path = "/v1/kv/%s" % path
response = await self._api.delete(path, params={
"cas": cas,
"recurse": recurse,
"separator": separator
})
return response
|
Deletes the Key
|
def normalize_bbox(bbox, rows, cols):
"""Normalize coordinates of a bounding box. Divide x-coordinates by image width and y-coordinates
by image height.
"""
if rows == 0:
raise ValueError('Argument rows cannot be zero')
if cols == 0:
raise ValueError('Argument cols cannot be zero')
x_min, y_min, x_max, y_max = bbox[:4]
normalized_bbox = [x_min / cols, y_min / rows, x_max / cols, y_max / rows]
return normalized_bbox + list(bbox[4:])
|
Normalize coordinates of a bounding box. Divide x-coordinates by image width and y-coordinates
by image height.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.