code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def authenticate(self, username, password):
"""Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
"""
# Update the username and password bound to this instance for re-authentication needs.
self._username = username
self._password = password
# Attempt to authenticate.
resp = requests.get(
self._url,
auth=(username, password),
**self._default_request_kwargs
)
# Attempt to extract authentication data.
try:
if resp.status_code == 200:
json_data = resp.json()
token = json_data['data']['token']
elif resp.status_code == 401:
raise errors.AuthFailure(resp.json().get('message', 'Authentication Failure.'))
else:
raise errors.AuthFailure(
"Unknown exception while authenticating: '{}'".format(resp.text)
)
except errors.AuthFailure:
raise
except Exception as ex:
logging.exception(ex)
raise errors.AuthFailure('{}: {}'.format(ex.__class__.__name__, ex))
# Update the token bound to this instance for use by other client operations layers.
self._token = token
logger.info('New API token received: "{}".'.format(token))
return token
|
Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
|
def _request_toc_element(self, index):
"""Request information about a specific item in the TOC"""
logger.debug('Requesting index %d on port %d', index, self.port)
pk = CRTPPacket()
if self._useV2:
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ITEM_V2, index & 0x0ff, (index >> 8) & 0x0ff)
self.cf.send_packet(pk, expected_reply=(
CMD_TOC_ITEM_V2, index & 0x0ff, (index >> 8) & 0x0ff))
else:
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ELEMENT, index)
self.cf.send_packet(pk, expected_reply=(CMD_TOC_ELEMENT, index))
|
Request information about a specific item in the TOC
|
def validate_template(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None):
'''
Validate cloudformation template
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.validate_template mystack-template
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
# Returns an object if json is validated and an exception if its not
return conn.validate_template(template_body, template_url)
except BotoServerError as e:
log.debug(e)
msg = 'Error while trying to validate template {0}.'.format(template_body)
log.error(msg)
return six.text_type(e)
|
Validate cloudformation template
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.validate_template mystack-template
|
def setup(self, path=None):
"""
Look for SExtractor program ('sextractor', or 'sex').
If a full path is provided, only this path is checked.
Raise a SExtractorException if it failed.
Return program and version if it succeed.
"""
# -- Finding sextractor program and its version
# first look for 'sextractor', then 'sex'
candidates = ['sextractor', 'sex']
if (path):
candidates = [path]
selected = None
for candidate in candidates:
try:
p = subprocess.Popen(candidate, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
(_out_err, _in) = (p.stdout, p.stdin)
versionline = _out_err.read()
if (versionline.find("SExtractor") != -1):
selected = candidate
break
except IOError:
continue
if not(selected):
raise SExtractorException(
"""
Cannot find SExtractor program. Check your PATH,
or provide the SExtractor program path in the constructor.
"""
)
_program = selected
# print versionline
_version_match = re.search("[Vv]ersion ([0-9\.])+", versionline)
if not _version_match:
raise SExtractorException(
"Cannot determine SExtractor version."
)
_version = _version_match.group()[8:]
if not _version:
raise SExtractorException(
"Cannot determine SExtractor version."
)
# print "Use " + self.program + " [" + self.version + "]"
return _program, _version
|
Look for SExtractor program ('sextractor', or 'sex').
If a full path is provided, only this path is checked.
Raise a SExtractorException if it failed.
Return program and version if it succeed.
|
def parse_add_loopback():
"""
Validate params when adding a loopback adapter
"""
class Add(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
try:
ipaddress.IPv4Interface("{}/{}".format(values[1], values[2]))
except ipaddress.AddressValueError as e:
raise argparse.ArgumentTypeError("Invalid IP address: {}".format(e))
except ipaddress.NetmaskValueError as e:
raise argparse.ArgumentTypeError("Invalid subnet mask: {}".format(e))
setattr(args, self.dest, values)
return Add
|
Validate params when adding a loopback adapter
|
def Stop(self):
"""Stops the emulator instance."""
if not self.__running:
return
logging.info('shutting down the emulator running at %s', self._host)
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/shutdown' % self._host,
method='POST', headers=headers)
if response.status != 200:
logging.warning('failed to shut down emulator; response: %s', response)
self.__running = False
# Delete temp files.
shutil.rmtree(self._tmp_dir)
|
Stops the emulator instance.
|
def IsWalletTransaction(self, tx):
"""
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
"""
for key, contract in self._contracts.items():
for output in tx.outputs:
if output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes():
return True
for script in tx.scripts:
if script.VerificationScript:
if bytes(contract.Script) == script.VerificationScript:
return True
for watch_script_hash in self._watch_only:
for output in tx.outputs:
if output.ScriptHash == watch_script_hash:
return True
for script in tx.scripts:
if Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash:
return True
return False
|
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
|
def minimum(left, right):
"""Returns element-wise minimum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise minimum of the input symbols.
Examples
--------
>>> mx.sym.minimum(2, 3.5)
2
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.minimum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 3., 4., 2., 4.], dtype=float32)
>>> z = mx.sym.minimum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 3., 2.], dtype=float32)
"""
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Minimum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MinimumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MinimumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left < right else right
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
|
Returns element-wise minimum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise minimum of the input symbols.
Examples
--------
>>> mx.sym.minimum(2, 3.5)
2
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.minimum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 3., 4., 2., 4.], dtype=float32)
>>> z = mx.sym.minimum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 3., 2.], dtype=float32)
|
def xpnsl(h1, h2, use_threads=True):
"""Cross-population version of the NSL statistic.
Parameters
----------
h1 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the first population.
h2 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the second population.
use_threads : bool, optional
If True use multiple threads to compute.
Returns
-------
score : ndarray, float, shape (n_variants,)
Unstandardized XPNSL scores.
"""
# check inputs
h1 = asarray_ndim(h1, 2)
check_integer_dtype(h1)
h2 = asarray_ndim(h2, 2)
check_integer_dtype(h2)
check_dim0_aligned(h1, h2)
h1 = memoryview_safe(h1)
h2 = memoryview_safe(h2)
if use_threads and multiprocessing.cpu_count() > 1:
# use multiple threads
# setup threadpool
pool = ThreadPool(min(4, multiprocessing.cpu_count()))
# scan forward
res1_fwd = pool.apply_async(nsl_scan, args=(h1,))
res2_fwd = pool.apply_async(nsl_scan, args=(h2,))
# scan backward
res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],))
res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],))
# wait for both to finish
pool.close()
pool.join()
# obtain results
nsl1_fwd = res1_fwd.get()
nsl2_fwd = res2_fwd.get()
nsl1_rev = res1_rev.get()
nsl2_rev = res2_rev.get()
# cleanup
pool.terminate()
else:
# compute without threads
# scan forward
nsl1_fwd = nsl_scan(h1)
nsl2_fwd = nsl_scan(h2)
# scan backward
nsl1_rev = nsl_scan(h1[::-1])
nsl2_rev = nsl_scan(h2[::-1])
# handle reverse scans
nsl1_rev = nsl1_rev[::-1]
nsl2_rev = nsl2_rev[::-1]
# compute unstandardized score
nsl1 = nsl1_fwd + nsl1_rev
nsl2 = nsl2_fwd + nsl2_rev
score = np.log(nsl1 / nsl2)
return score
|
Cross-population version of the NSL statistic.
Parameters
----------
h1 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the first population.
h2 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the second population.
use_threads : bool, optional
If True use multiple threads to compute.
Returns
-------
score : ndarray, float, shape (n_variants,)
Unstandardized XPNSL scores.
|
def parse_content(self, content):
"""
Sample Input::
TimeoutStartUSec=1min 30s
LimitNOFILE=65536
LimitMEMLOCK=
LimitLOCKS=18446744073709551615
Sample Output::
{"LimitNOFILE" : "65536",
"TimeoutStartUSec" : "1min 30s",
"LimitLOCKS" : "18446744073709551615"}
In CMD's output, empty properties are suppressed by default.
We will also suppressed empty properties in return data.
"""
data = {}
data = split_kv_pairs(content, use_partition=False)
"""Remove empty key"""
self.data = dict((k, v) for k, v in data.items() if not v == '')
|
Sample Input::
TimeoutStartUSec=1min 30s
LimitNOFILE=65536
LimitMEMLOCK=
LimitLOCKS=18446744073709551615
Sample Output::
{"LimitNOFILE" : "65536",
"TimeoutStartUSec" : "1min 30s",
"LimitLOCKS" : "18446744073709551615"}
In CMD's output, empty properties are suppressed by default.
We will also suppressed empty properties in return data.
|
def unsubscribe(self, code_list, subtype_list):
"""
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
"""
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
"conn_id": self.get_sync_conn_id()
}
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
continue
code_set = self._ctx_subscribe[subtype]
for code in code_list:
if code not in code_set:
continue
code_set.remove(code)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None
|
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
|
def _connect(self):
"""Connect to our domain"""
if not self._db:
import boto
sdb = boto.connect_sdb()
if not self.domain_name:
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError, e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
|
Connect to our domain
|
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
|
Return the encoding, idletime, or refcount about the key
|
async def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
while True:
try:
packet_header = await self._read_bytes(4)
except asyncio.CancelledError:
self._close_on_cancel()
raise
btrl, btrh, packet_number = struct.unpack(
'<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
# Outbound and inbound packets are numbered sequentialy, so
# we increment in both write_packet and read_packet. The count
# is reset at new COMMAND PHASE.
if packet_number != self._next_seq_id:
raise InternalError(
"Packet sequence number wrong - got %d expected %d" %
(packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
try:
recv_data = await self._read_bytes(bytes_to_read)
except asyncio.CancelledError:
self._close_on_cancel()
raise
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self._encoding)
packet.check_error()
return packet
|
Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
|
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError('Factors for all the cliques or clusters not defined.')
cardinalities = self.get_cardinality()
if len(set((x for clique in self.nodes() for x in clique))) != len(cardinalities):
raise ValueError('Factors for all the variables not defined.')
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if (cardinalities[variable] != cardinality):
raise ValueError(
'Cardinality of variable {var} not matching among factors'.format(var=variable))
return True
|
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
|
def vatu0(self,E,Lz,u0,R,retv2=False):
"""
NAME:
vatu0
PURPOSE:
calculate the velocity at u0
INPUT:
E - energy
Lz - angular momentum
u0 - u0
R - radius corresponding to u0,pi/2.
retv2= (False), if True return v^2
OUTPUT:
velocity
HISTORY:
2012-11-29 - Written - Bovy (IAS)
"""
v2= (2.*(E-actionAngleStaeckel.potentialStaeckel(u0,numpy.pi/2.,
self._pot,
self._delta))
-Lz**2./R**2.)
if retv2: return v2
v2[(v2 < 0.)*(v2 > -10.**-7.)]= 0.
return numpy.sqrt(v2)
|
NAME:
vatu0
PURPOSE:
calculate the velocity at u0
INPUT:
E - energy
Lz - angular momentum
u0 - u0
R - radius corresponding to u0,pi/2.
retv2= (False), if True return v^2
OUTPUT:
velocity
HISTORY:
2012-11-29 - Written - Bovy (IAS)
|
def _get_connection_from_url(self, url, timeout, **kwargs):
"""Returns a connection object given a string url"""
url = self._decode_url(url, "")
if url.scheme == 'http' or url.scheme == 'https':
return HttpConnection(url.geturl(), timeout=timeout, **kwargs)
else:
if sys.version_info[0] > 2:
raise ValueError("Thrift transport is not available "
"for Python 3")
try:
from thrift_connection import ThriftConnection
except ImportError:
raise ImportError("The 'thrift' python package "
"does not seem to be installed.")
return ThriftConnection(url.hostname, url.port,
timeout=timeout, **kwargs)
|
Returns a connection object given a string url
|
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
|
Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
|
def get_heading_encoding(response):
'''Return the document encoding from a HTTP header.
Args:
response (Response): An instance of :class:`.http.Response`.
Returns:
``str``, ``None``: The codec name.
'''
encoding = wpull.protocol.http.util.parse_charset(
response.fields.get('content-type', ''))
if encoding:
return wpull.string.normalize_codec_name(encoding)
else:
return None
|
Return the document encoding from a HTTP header.
Args:
response (Response): An instance of :class:`.http.Response`.
Returns:
``str``, ``None``: The codec name.
|
def save(self):
"""
Save any outstanding items. This returns a Deferred which will
errback if Tor was unhappy with anything, or callback with
this TorConfig object on success.
"""
if not self.needs_save():
return defer.succeed(self)
args = []
directories = []
for (key, value) in self.unsaved.items():
if key == 'HiddenServices':
self.config['HiddenServices'] = value
# using a list here because at least one unit-test
# cares about order -- and conceivably order *could*
# matter here, to Tor...
services = list()
# authenticated services get flattened into the HiddenServices list...
for hs in value:
if IOnionClient.providedBy(hs):
parent = IOnionClient(hs).parent
if parent not in services:
services.append(parent)
elif isinstance(hs, (EphemeralOnionService, EphemeralHiddenService)):
raise ValueError(
"Only filesystem based Onion services may be added"
" via TorConfig.hiddenservices; ephemeral services"
" must be created with 'create_onion_service'."
)
else:
if hs not in services:
services.append(hs)
for hs in services:
for (k, v) in hs.config_attributes():
if k == 'HiddenServiceDir':
if v not in directories:
directories.append(v)
args.append(k)
args.append(v)
else:
raise RuntimeError("Trying to add hidden service with same HiddenServiceDir: %s" % v)
else:
args.append(k)
args.append(v)
continue
if isinstance(value, list):
for x in value:
# FIXME XXX
if x is not DEFAULT_VALUE:
args.append(key)
args.append(str(x))
else:
args.append(key)
args.append(value)
# FIXME in future we should wait for CONF_CHANGED and
# update then, right?
real_name = self._find_real_name(key)
if not isinstance(value, list) and real_name in self.parsers:
value = self.parsers[real_name].parse(value)
self.config[real_name] = value
# FIXME might want to re-think this, but currently there's no
# way to put things into a config and get them out again
# nicely...unless you just don't assign a protocol
if self.protocol:
d = self.protocol.set_conf(*args)
d.addCallback(self._save_completed)
return d
else:
self._save_completed()
return defer.succeed(self)
|
Save any outstanding items. This returns a Deferred which will
errback if Tor was unhappy with anything, or callback with
this TorConfig object on success.
|
def get_bulk_asn_whois(addresses=None, retry_count=3, timeout=120):
"""
The function for retrieving ASN information for multiple IP addresses from
Cymru via port 43/tcp (WHOIS).
Args:
addresses (:obj:`list` of :obj:`str`): IP addresses to lookup.
retry_count (:obj:`int`): The number of times to retry in case socket
errors, timeouts, connection resets, etc. are encountered.
Defaults to 3.
timeout (:obj:`int`): The default timeout for socket connections in
seconds. Defaults to 120.
Returns:
str: The raw ASN bulk data, new line separated.
Raises:
ValueError: addresses argument must be a list of IPv4/v6 address
strings.
ASNLookupError: The ASN bulk lookup failed.
"""
if not isinstance(addresses, list):
raise ValueError('addresses argument must be a list of IPv4/v6 '
'address strings.')
try:
# Create the connection for the Cymru whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(timeout)
log.debug('ASN bulk query initiated.')
conn.connect((CYMRU_WHOIS, 43))
# Query the Cymru whois server, and store the results.
conn.sendall((
' -r -a -c -p -f begin\n{0}\nend'.format(
'\n'.join(addresses))
).encode())
data = ''
while True:
d = conn.recv(4096).decode()
data += d
if not d:
break
conn.close()
return str(data)
except (socket.timeout, socket.error) as e: # pragma: no cover
log.debug('ASN bulk query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('ASN bulk query retrying (count: {0})'.format(
str(retry_count)))
return get_bulk_asn_whois(addresses, retry_count - 1, timeout)
else:
raise ASNLookupError('ASN bulk lookup failed.')
except: # pragma: no cover
raise ASNLookupError('ASN bulk lookup failed.')
|
The function for retrieving ASN information for multiple IP addresses from
Cymru via port 43/tcp (WHOIS).
Args:
addresses (:obj:`list` of :obj:`str`): IP addresses to lookup.
retry_count (:obj:`int`): The number of times to retry in case socket
errors, timeouts, connection resets, etc. are encountered.
Defaults to 3.
timeout (:obj:`int`): The default timeout for socket connections in
seconds. Defaults to 120.
Returns:
str: The raw ASN bulk data, new line separated.
Raises:
ValueError: addresses argument must be a list of IPv4/v6 address
strings.
ASNLookupError: The ASN bulk lookup failed.
|
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train)
|
Perform a forward pass on each executor.
|
def _process_custom_unitary(self, node):
"""Process a custom unitary node."""
name = node.name
if node.arguments is not None:
args = self._process_node(node.arguments)
else:
args = []
bits = [self._process_bit_id(node_element)
for node_element in node.bitlist.children]
if name in self.gates:
gargs = self.gates[name]["args"]
gbits = self.gates[name]["bits"]
# Loop over register arguments, if any.
maxidx = max(map(len, bits))
for idx in range(maxidx):
self.arg_stack.append({gargs[j]: args[j]
for j in range(len(gargs))})
# Only index into register arguments.
element = [idx*x for x in
[len(bits[j]) > 1 for j in range(len(bits))]]
self.bit_stack.append({gbits[j]: bits[j][element[j]]
for j in range(len(gbits))})
self._create_dag_op(name,
[self.arg_stack[-1][s].sym() for s in gargs],
[self.bit_stack[-1][s] for s in gbits])
self.arg_stack.pop()
self.bit_stack.pop()
else:
raise QiskitError("internal error undefined gate:",
"line=%s" % node.line, "file=%s" % node.file)
|
Process a custom unitary node.
|
def on(self, analyte=None, filt=None):
"""
Turn on specified filter(s) for specified analyte(s).
Parameters
----------
analyte : optional, str or array_like
Name or list of names of analytes.
Defaults to all analytes.
filt : optional. int, str or array_like
Name/number or iterable names/numbers of filters.
Returns
-------
None
"""
if isinstance(analyte, str):
analyte = [analyte]
if isinstance(filt, (int, float)):
filt = [filt]
elif isinstance(filt, str):
filt = self.fuzzmatch(filt, multi=True)
if analyte is None:
analyte = self.analytes
if filt is None:
filt = list(self.index.values())
for a in analyte:
for f in filt:
if isinstance(f, (int, float)):
f = self.index[int(f)]
try:
self.switches[a][f] = True
except KeyError:
f = self.fuzzmatch(f, multi=False)
self.switches[a][f] = True
# for k in self.switches[a].keys():
# if f in k:
# self.switches[a][k] = True
return
|
Turn on specified filter(s) for specified analyte(s).
Parameters
----------
analyte : optional, str or array_like
Name or list of names of analytes.
Defaults to all analytes.
filt : optional. int, str or array_like
Name/number or iterable names/numbers of filters.
Returns
-------
None
|
def alpha_beta_aligned(returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None):
"""Calculates annualized alpha and beta.
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should have
the same shape.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
"""
if out is None:
out = np.empty(returns.shape[1:] + (2,), dtype='float64')
b = beta_aligned(returns, factor_returns, risk_free, out=out[..., 1])
alpha_aligned(
returns,
factor_returns,
risk_free,
period,
annualization,
out=out[..., 0],
_beta=b,
)
return out
|
Calculates annualized alpha and beta.
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should have
the same shape.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
|
def _insert_compressed(
collection_name, docs, check_keys, continue_on_error, opts, ctx):
"""Internal compressed unacknowledged insert message helper."""
op_insert, max_bson_size = _insert(
collection_name, docs, check_keys, continue_on_error, opts)
rid, msg = _compress(2002, op_insert, ctx)
return rid, msg, max_bson_size
|
Internal compressed unacknowledged insert message helper.
|
def run_preassembly(stmts_in, **kwargs):
"""Run preassembly on a list of statements.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to preassemble.
return_toplevel : Optional[bool]
If True, only the top-level statements are returned. If False,
all statements are returned irrespective of level of specificity.
Default: True
poolsize : Optional[int]
The number of worker processes to use to parallelize the
comparisons performed by the function. If None (default), no
parallelization is performed. NOTE: Parallelization is only
available on Python 3.4 and above.
size_cutoff : Optional[int]
Groups with size_cutoff or more statements are sent to worker
processes, while smaller groups are compared in the parent process.
Default value is 100. Not relevant when parallelization is not
used.
belief_scorer : Optional[indra.belief.BeliefScorer]
Instance of BeliefScorer class to use in calculating Statement
probabilities. If None is provided (default), then the default
scorer is used.
hierarchies : Optional[dict]
Dict of hierarchy managers to use for preassembly
flatten_evidence : Optional[bool]
If True, evidences are collected and flattened via supports/supported_by
links. Default: False
flatten_evidence_collect_from : Optional[str]
String indicating whether to collect and flatten evidence from the
`supports` attribute of each statement or the `supported_by` attribute.
If not set, defaults to 'supported_by'.
Only relevant when flatten_evidence is True.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
save_unique : Optional[str]
The name of a pickle file to save the unique statements into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of preassembled top-level statements.
"""
dump_pkl_unique = kwargs.get('save_unique')
belief_scorer = kwargs.get('belief_scorer')
use_hierarchies = kwargs['hierarchies'] if 'hierarchies' in kwargs else \
hierarchies
be = BeliefEngine(scorer=belief_scorer)
pa = Preassembler(hierarchies, stmts_in)
run_preassembly_duplicate(pa, be, save=dump_pkl_unique)
dump_pkl = kwargs.get('save')
return_toplevel = kwargs.get('return_toplevel', True)
poolsize = kwargs.get('poolsize', None)
size_cutoff = kwargs.get('size_cutoff', 100)
options = {'save': dump_pkl, 'return_toplevel': return_toplevel,
'poolsize': poolsize, 'size_cutoff': size_cutoff,
'flatten_evidence': kwargs.get('flatten_evidence', False),
'flatten_evidence_collect_from':
kwargs.get('flatten_evidence_collect_from', 'supported_by')
}
stmts_out = run_preassembly_related(pa, be, **options)
return stmts_out
|
Run preassembly on a list of statements.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to preassemble.
return_toplevel : Optional[bool]
If True, only the top-level statements are returned. If False,
all statements are returned irrespective of level of specificity.
Default: True
poolsize : Optional[int]
The number of worker processes to use to parallelize the
comparisons performed by the function. If None (default), no
parallelization is performed. NOTE: Parallelization is only
available on Python 3.4 and above.
size_cutoff : Optional[int]
Groups with size_cutoff or more statements are sent to worker
processes, while smaller groups are compared in the parent process.
Default value is 100. Not relevant when parallelization is not
used.
belief_scorer : Optional[indra.belief.BeliefScorer]
Instance of BeliefScorer class to use in calculating Statement
probabilities. If None is provided (default), then the default
scorer is used.
hierarchies : Optional[dict]
Dict of hierarchy managers to use for preassembly
flatten_evidence : Optional[bool]
If True, evidences are collected and flattened via supports/supported_by
links. Default: False
flatten_evidence_collect_from : Optional[str]
String indicating whether to collect and flatten evidence from the
`supports` attribute of each statement or the `supported_by` attribute.
If not set, defaults to 'supported_by'.
Only relevant when flatten_evidence is True.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
save_unique : Optional[str]
The name of a pickle file to save the unique statements into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of preassembled top-level statements.
|
def add_resource(self, value, name: str = 'default', context_attr: str = None,
types: Union[type, Sequence[type]] = ()) -> None:
"""
Add a resource to this context.
This will cause a ``resource_added`` event to be dispatched.
:param value: the actual resource value
:param name: name of this resource (unique among all its registered types within a single
context)
:param context_attr: name of the context attribute this resource will be accessible as
:param types: type(s) to register the resource as (omit to use the type of ``value``)
:raises asphalt.core.context.ResourceConflict: if the resource conflicts with an existing
one in any way
"""
assert check_argument_types()
self._check_closed()
if isinstance(types, type):
types = (types,)
elif not types:
types = (type(value),)
if value is None:
raise ValueError('"value" must not be None')
if not resource_name_re.fullmatch(name):
raise ValueError('"name" must be a nonempty string consisting only of alphanumeric '
'characters and underscores')
if context_attr and getattr_static(self, context_attr, None) is not None:
raise ResourceConflict('this context already has an attribute {!r}'.format(
context_attr))
for resource_type in types:
if (resource_type, name) in self._resources:
raise ResourceConflict(
'this context already contains a resource of type {} using the name {!r}'.
format(qualified_name(resource_type), name))
resource = ResourceContainer(value, tuple(types), name, context_attr, False)
for type_ in resource.types:
self._resources[(type_, name)] = resource
if context_attr:
setattr(self, context_attr, value)
# Notify listeners that a new resource has been made available
self.resource_added.dispatch(types, name, False)
|
Add a resource to this context.
This will cause a ``resource_added`` event to be dispatched.
:param value: the actual resource value
:param name: name of this resource (unique among all its registered types within a single
context)
:param context_attr: name of the context attribute this resource will be accessible as
:param types: type(s) to register the resource as (omit to use the type of ``value``)
:raises asphalt.core.context.ResourceConflict: if the resource conflicts with an existing
one in any way
|
def getopt(self, name, argv, opts):
"""getopt(name, argv, opts)
Parse X command line options, inserting the recognised options
into the resource database.
NAME is the application name, and will be prepended to all
specifiers. ARGV is the list of command line arguments,
typically sys.argv[1:].
OPTS is a mapping of options to resource specifiers. The key is
the option flag (with leading -), and the value is an instance of
some Option subclass:
NoArg(specifier, value): set resource to value.
IsArg(specifier): set resource to option itself
SepArg(specifier): value is next argument
ResArg: resource and value in next argument
SkipArg: ignore this option and next argument
SkipLine: ignore rest of arguments
SkipNArgs(count): ignore this option and count arguments
The remaining, non-option, oparguments is returned.
rdb.OptionError is raised if there is an error in the argument list.
"""
while argv and argv[0] and argv[0][0] == '-':
try:
argv = opts[argv[0]].parse(name, self, argv)
except KeyError:
raise OptionError('unknown option: %s' % argv[0])
except IndexError:
raise OptionError('missing argument to option: %s' % argv[0])
return argv
|
getopt(name, argv, opts)
Parse X command line options, inserting the recognised options
into the resource database.
NAME is the application name, and will be prepended to all
specifiers. ARGV is the list of command line arguments,
typically sys.argv[1:].
OPTS is a mapping of options to resource specifiers. The key is
the option flag (with leading -), and the value is an instance of
some Option subclass:
NoArg(specifier, value): set resource to value.
IsArg(specifier): set resource to option itself
SepArg(specifier): value is next argument
ResArg: resource and value in next argument
SkipArg: ignore this option and next argument
SkipLine: ignore rest of arguments
SkipNArgs(count): ignore this option and count arguments
The remaining, non-option, oparguments is returned.
rdb.OptionError is raised if there is an error in the argument list.
|
def can_use_autofor(self, node):
"""
Check if given for Node can use autoFor syntax.
To use auto_for:
- iterator should have local scope
- yield should not be use
- OpenMP pragma should not be use
TODO : Yield should block only if it is use in the for loop, not in the
whole function.
"""
auto_for = (isinstance(node.target, ast.Name) and
node.target.id in self.scope[node] and
node.target.id not in self.openmp_deps)
auto_for &= not metadata.get(node, OMPDirective)
auto_for &= node.target.id not in self.openmp_deps
return auto_for
|
Check if given for Node can use autoFor syntax.
To use auto_for:
- iterator should have local scope
- yield should not be use
- OpenMP pragma should not be use
TODO : Yield should block only if it is use in the for loop, not in the
whole function.
|
def simxGetFloatingParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = ct.c_float()
return c_GetFloatingParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
|
Please have a look at the function description/documentation in the V-REP user manual
|
def add_connection(self, host):
"""
Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
"""
self.hosts.append(host)
self.set_connections(self.hosts)
|
Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
|
def nr_of_antenna(nbl, auto_correlations=False):
"""
Compute the number of antenna for the
given number of baselines. Can specify whether
auto-correlations should be taken into
account
"""
t = 1 if auto_correlations is False else -1
return int(t + math.sqrt(1 + 8*nbl)) // 2
|
Compute the number of antenna for the
given number of baselines. Can specify whether
auto-correlations should be taken into
account
|
def f(self, m):
"""
The recursively composed version of filter function f.
By default, returns logical **conjunction** of operator and single
child operator
"""
if len(self.children) == 0:
return self._f(m)
elif len(self.children) == 1:
return self._f(m) and self.children[0].f(m)
else:
raise Exception(
f"{self.__name__} does not support more than one child Matcher"
)
|
The recursively composed version of filter function f.
By default, returns logical **conjunction** of operator and single
child operator
|
def forward_events_to(self, sink, include_source=False):
"""This forwards signal to sink"""
assert isinstance(sink, Eventful), f'{sink.__class__.__name__} is not Eventful'
self._forwards[sink] = include_source
|
This forwards signal to sink
|
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read())
|
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
|
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_unicode(text).encode("utf-8"))
return h.hexdigest()
|
Returns the md5 hash of a string.
|
def delete_chat_photo(chat_id, **kwargs):
"""
Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:return: Returns True on success.
:rtype: bool
"""
# required args
params = dict(chat_id=chat_id)
return TelegramBotRPCRequest('deleteChatPhoto', params=params, on_result=lambda result: result, **kwargs)
|
Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:return: Returns True on success.
:rtype: bool
|
def crsConvert(crsIn, crsOut):
"""
convert between different types of spatial references
Parameters
----------
crsIn: int, str or :osgeo:class:`osr.SpatialReference`
the input CRS
crsOut: {'wkt', 'proj4', 'epsg', 'osr', 'opengis' or 'prettyWkt'}
the output CRS type
Returns
-------
int, str or :osgeo:class:`osr.SpatialReference`
the output CRS
Examples
--------
convert an integer EPSG code to PROJ4:
>>> crsConvert(4326, 'proj4')
'+proj=longlat +datum=WGS84 +no_defs '
convert a PROJ4 string to an opengis URL:
>>> crsConvert('+proj=longlat +datum=WGS84 +no_defs ', 'opengis')
'http://www.opengis.net/def/crs/EPSG/0/4326'
convert the opengis URL back to EPSG:
>>> crsConvert('http://www.opengis.net/def/crs/EPSG/0/4326', 'epsg')
4326
convert an EPSG compound CRS (WGS84 horizontal + EGM96 vertical)
>>> crsConvert('EPSG:4326+5773', 'proj4')
'+proj=longlat +datum=WGS84 +geoidgrids=egm96_15.gtx +vunits=m +no_defs '
"""
if isinstance(crsIn, osr.SpatialReference):
srs = crsIn.Clone()
else:
srs = osr.SpatialReference()
if isinstance(crsIn, int):
crsIn = 'EPSG:{}'.format(crsIn)
if isinstance(crsIn, str):
try:
srs.SetFromUserInput(crsIn)
except RuntimeError:
raise TypeError('crsIn not recognized; must be of type WKT, PROJ4 or EPSG')
else:
raise TypeError('crsIn must be of type int, str or osr.SpatialReference')
if crsOut == 'wkt':
return srs.ExportToWkt()
elif crsOut == 'prettyWkt':
return srs.ExportToPrettyWkt()
elif crsOut == 'proj4':
return srs.ExportToProj4()
elif crsOut == 'epsg':
srs.AutoIdentifyEPSG()
return int(srs.GetAuthorityCode(None))
elif crsOut == 'opengis':
srs.AutoIdentifyEPSG()
return 'http://www.opengis.net/def/crs/EPSG/0/{}'.format(srs.GetAuthorityCode(None))
elif crsOut == 'osr':
return srs
else:
raise ValueError('crsOut not recognized; must be either wkt, proj4, opengis or epsg')
|
convert between different types of spatial references
Parameters
----------
crsIn: int, str or :osgeo:class:`osr.SpatialReference`
the input CRS
crsOut: {'wkt', 'proj4', 'epsg', 'osr', 'opengis' or 'prettyWkt'}
the output CRS type
Returns
-------
int, str or :osgeo:class:`osr.SpatialReference`
the output CRS
Examples
--------
convert an integer EPSG code to PROJ4:
>>> crsConvert(4326, 'proj4')
'+proj=longlat +datum=WGS84 +no_defs '
convert a PROJ4 string to an opengis URL:
>>> crsConvert('+proj=longlat +datum=WGS84 +no_defs ', 'opengis')
'http://www.opengis.net/def/crs/EPSG/0/4326'
convert the opengis URL back to EPSG:
>>> crsConvert('http://www.opengis.net/def/crs/EPSG/0/4326', 'epsg')
4326
convert an EPSG compound CRS (WGS84 horizontal + EGM96 vertical)
>>> crsConvert('EPSG:4326+5773', 'proj4')
'+proj=longlat +datum=WGS84 +geoidgrids=egm96_15.gtx +vunits=m +no_defs '
|
def acquire(self, tag, blocking=True):
"""Acquire the semaphore
:param tag: A tag identifying what is acquiring the semaphore. Note
that this is not really needed to directly use this class but is
needed for API compatibility with the SlidingWindowSemaphore
implementation.
:param block: If True, block until it can be acquired. If False,
do not block and raise an exception if cannot be aquired.
:returns: A token (can be None) to use when releasing the semaphore
"""
logger.debug("Acquiring %s", tag)
if not self._semaphore.acquire(blocking):
raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
|
Acquire the semaphore
:param tag: A tag identifying what is acquiring the semaphore. Note
that this is not really needed to directly use this class but is
needed for API compatibility with the SlidingWindowSemaphore
implementation.
:param block: If True, block until it can be acquired. If False,
do not block and raise an exception if cannot be aquired.
:returns: A token (can be None) to use when releasing the semaphore
|
def with_user_roles(roles):
"""
with_user_roles(roles)
It allows to check if a user has access to a view by adding the decorator
with_user_roles([])
Requires flask-login
In your model, you must have a property 'role', which will be invoked to
be compared to the roles provided.
If current_user doesn't have a role, it will throw a 403
If the current_user is not logged in will throw a 401
* Require Flask-Login
---
Usage
@app.route('/user')
@login_require
@with_user_roles(['admin', 'user'])
def user_page(self):
return "You've got permission to access this page."
"""
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if current_user.is_authenticated():
if not hasattr(current_user, "role"):
raise AttributeError("<'role'> doesn't exist in login 'current_user'")
if current_user.role not in roles:
return abort(403)
else:
return abort(401)
return f(*args, **kwargs)
return wrapped
return wrapper
|
with_user_roles(roles)
It allows to check if a user has access to a view by adding the decorator
with_user_roles([])
Requires flask-login
In your model, you must have a property 'role', which will be invoked to
be compared to the roles provided.
If current_user doesn't have a role, it will throw a 403
If the current_user is not logged in will throw a 401
* Require Flask-Login
---
Usage
@app.route('/user')
@login_require
@with_user_roles(['admin', 'user'])
def user_page(self):
return "You've got permission to access this page."
|
def read(fname,**kw):
'''
Reads an lsp output file and returns a raw dump of data,
sectioned into quantities either as an dictionary or a typed numpy array.
Parameters:
-----------
fname -- filename of thing to read
Keyword Arguments:
------------------
vprint -- Verbose printer. Used in scripts
override -- (type, start) => A tuple of a dump type and a place to start
in the passed file, useful to attempting to read semicorrupted
files.
gzip -- Read as a gzip file.
flds/sclr Specific Arguments:
-----------------------------
var -- list of quantities to be read. For fields, this can consist
of strings that include vector components, e.g., 'Ex'. If
None (default), read all quantities.
keep_edges -- If set to truthy, then don't remove the edges from domains before
concatenation and don't reshape the flds data.
sort -- If not None, sort using these indices, useful for avoiding
resorting. If True and not an ndarray, just sort.
first_sort -- If truthy, sort, and return the sort data for future flds
that should have the same shape.
keep_xs -- Keep the xs's, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
'''
if test(kw,'gzip') and kw['gzip'] == 'guess':
kw['gzip'] = re.search(r'\.gz$', fname) is not None;
openf = gzip.open if test(kw, 'gzip') else open;
with openf(fname,'rb') as file:
if test(kw,'override'):
dump, start = kw['override'];
file.seek(start);
header = {'dump_type': dump};
if not test(kw, 'var') and 2 <= header['dump_type'] <= 3 :
raise ValueError(
"If you want to force to read as a scalar, you need to supply the quantities"
);
else:
header = get_header(file);
vprint = kw['vprint'] if test(kw, 'vprint') else lambda s: None;
if 2 <= header['dump_type'] <= 3 :
if not test(kw, 'var'):
var=[i[0] for i in header['quantities']];
else:
var=kw['var'];
keep_edges = test(kw, 'keep_edges');
first_sort = test(kw, 'first_sort');
if test(kw,'sort'):
sort = kw['sort']
else:
sort = None;
keep_xs = test(kw, 'keep_xs');
return_array = test(kw, 'return_array');
readers = {
1: lambda: read_particles(file, header),
2: lambda: read_flds(
file,header,var,vprint,
keep_edges=keep_edges,
first_sort=first_sort,
sort=sort,
keep_xs=keep_xs,
return_array=return_array),
3: lambda: read_flds(
file,header,var, vprint,
keep_edges=keep_edges,
first_sort=first_sort,
sort=sort,
keep_xs=keep_xs,
return_array=return_array,
vector=False),
6: lambda: read_movie(file, header),
10:lambda: read_pext(file,header)
};
try:
d = readers[header['dump_type']]();
except KeyError:
raise NotImplementedError("Other file types not implemented yet!");
return d;
|
Reads an lsp output file and returns a raw dump of data,
sectioned into quantities either as an dictionary or a typed numpy array.
Parameters:
-----------
fname -- filename of thing to read
Keyword Arguments:
------------------
vprint -- Verbose printer. Used in scripts
override -- (type, start) => A tuple of a dump type and a place to start
in the passed file, useful to attempting to read semicorrupted
files.
gzip -- Read as a gzip file.
flds/sclr Specific Arguments:
-----------------------------
var -- list of quantities to be read. For fields, this can consist
of strings that include vector components, e.g., 'Ex'. If
None (default), read all quantities.
keep_edges -- If set to truthy, then don't remove the edges from domains before
concatenation and don't reshape the flds data.
sort -- If not None, sort using these indices, useful for avoiding
resorting. If True and not an ndarray, just sort.
first_sort -- If truthy, sort, and return the sort data for future flds
that should have the same shape.
keep_xs -- Keep the xs's, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
|
def options(self, **options):
"""A context-manager for setting connection options; the original
values of the options will be restored when the context-manager exits.
For example::
with c.options(gui_mode = False):
c.cmd.vol_list()
"""
self._contexts.append(self._contexts[-1].copy())
self.set_options(**options)
try:
yield
finally:
self._contexts.pop(-1)
|
A context-manager for setting connection options; the original
values of the options will be restored when the context-manager exits.
For example::
with c.options(gui_mode = False):
c.cmd.vol_list()
|
def state(name):
'''
Return state of container (running or stopped)
CLI Example:
.. code-block:: bash
salt myminion nspawn.state <name>
'''
try:
cmd = 'show {0} --property=State'.format(name)
return _machinectl(cmd, ignore_retcode=True)['stdout'].split('=')[-1]
except IndexError:
return 'stopped'
|
Return state of container (running or stopped)
CLI Example:
.. code-block:: bash
salt myminion nspawn.state <name>
|
def _remove_extraneous_xml_declarations(xml_str):
"""
Sometimes devices return XML with more than one XML declaration in, such as when returning
their own XML config files. This removes the extra ones and preserves the first one.
"""
xml_declaration = ''
if xml_str.startswith('<?xml'):
xml_declaration, xml_str = xml_str.split('?>', maxsplit=1)
xml_declaration += '?>'
xml_str = re.sub(r'<\?xml.*?\?>', '', xml_str, flags=re.I)
return xml_declaration + xml_str
|
Sometimes devices return XML with more than one XML declaration in, such as when returning
their own XML config files. This removes the extra ones and preserves the first one.
|
def SetSize(self, rect):
"Called to position/size the edit control within the cell rectangle."
self._tc.SetDimensions(rect.x, rect.y, rect.width+2, rect.height+2,
wx.SIZE_ALLOW_MINUS_ONE)
|
Called to position/size the edit control within the cell rectangle.
|
def handle_exception(self, exc):
"""Use custom exception handler for errors."""
if isinstance(
exc, (rest_exceptions.NotAuthenticated,
rest_exceptions.AuthenticationFailed)) and self.HANDLE_UNAUTHENTICATED:
return HttpResponseRedirect('{}?next={}'.format(
reverse('users:login'),
self.request.get_full_path()))
if isinstance(exc, Http404):
raise Http404()
if isinstance(exc, rest_exceptions.PermissionDenied):
raise django_exceptions.PermissionDenied()
return super().handle_exception(exc)
|
Use custom exception handler for errors.
|
def token(config, token):
"""Store and fetch a GitHub access token"""
if not token:
info_out(
"To generate a personal API token, go to:\n\n\t"
"https://github.com/settings/tokens\n\n"
"To read more about it, go to:\n\n\t"
"https://help.github.com/articles/creating-an-access"
"-token-for-command-line-use/\n\n"
'Remember to enable "repo" in the scopes.'
)
token = getpass.getpass("GitHub API Token: ").strip()
url = urllib.parse.urljoin(config.github_url, "/user")
assert url.startswith("https://"), url
response = requests.get(url, headers={"Authorization": "token {}".format(token)})
if response.status_code == 200:
update(
config.configfile,
{
"GITHUB": {
"github_url": config.github_url,
"token": token,
"login": response.json()["login"],
}
},
)
name = response.json()["name"] or response.json()["login"]
success_out("Hi! {}".format(name))
else:
error_out("Failed - {} ({})".format(response.status_code, response.content))
|
Store and fetch a GitHub access token
|
def install_docs(instance, clear_target):
"""Builds and installs the complete HFOS documentation."""
_check_root()
def make_docs():
"""Trigger a Sphinx make command to build the documentation."""
log("Generating HTML documentation")
try:
build = Popen(
[
'make',
'html'
],
cwd='docs/'
)
build.wait()
except Exception as e:
log("Problem during documentation building: ", e, type(e),
exc=True, lvl=error)
return False
return True
make_docs()
# If these need changes, make sure they are watertight and don't remove
# wanted stuff!
target = os.path.join('/var/lib/hfos', instance, 'frontend/docs')
source = 'docs/build/html'
log("Updating documentation directory:", target)
if not os.path.exists(os.path.join(os.path.curdir, source)):
log(
"Documentation not existing yet. Run python setup.py "
"build_sphinx first.", lvl=error)
return
if os.path.exists(target):
log("Path already exists: " + target)
if clear_target:
log("Cleaning up " + target, lvl=warn)
shutil.rmtree(target)
log("Copying docs to " + target)
copy_tree(source, target)
log("Done: Install Docs")
|
Builds and installs the complete HFOS documentation.
|
def payload_unregister(klass, pid):
""" is used while a hook is running to let Juju know
that a payload has been manually stopped. The <class> and <id> provided
must match a payload that has been previously registered with juju using
payload-register."""
cmd = ['payload-unregister']
for x in [klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
|
is used while a hook is running to let Juju know
that a payload has been manually stopped. The <class> and <id> provided
must match a payload that has been previously registered with juju using
payload-register.
|
def _norm_default(x):
"""Default Euclidean norm implementation."""
# Lazy import to improve `import odl` time
import scipy.linalg
if _blas_is_applicable(x.data):
nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype)
norm = partial(nrm2, n=native(x.size))
else:
norm = np.linalg.norm
return norm(x.data.ravel())
|
Default Euclidean norm implementation.
|
def get_subfolders(self):
"""Retrieve all child Folders inside of this Folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`]
"""
headers = self.headers
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/childfolders'
r = requests.get(endpoint, headers=headers)
if check_response(r):
return self._json_to_folders(self.account, r.json())
|
Retrieve all child Folders inside of this Folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`]
|
def _on_stackexchange_user(self, future, access_token, response):
"""Invoked as a callback when self.stackexchange_request returns the
response to the request for user data.
:param method future: The callback method to pass along
:param str access_token: The access token for the user's use
:param dict response: The HTTP response already decoded
"""
response['access_token'] = access_token
future.set_result(response)
|
Invoked as a callback when self.stackexchange_request returns the
response to the request for user data.
:param method future: The callback method to pass along
:param str access_token: The access token for the user's use
:param dict response: The HTTP response already decoded
|
def cols_strip(df,col_list, dest = False):
""" Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
"""
if not dest:
return _pd.DataFrame({col_name:col_strip(df,col_name) for col_name in col_list})
for col_name in col_list:
col_strip(df,col_name,dest)
|
Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def _yum_pkginfo(output):
'''
Parse yum/dnf output (which could contain irregular line breaks if package
names are long) retrieving the name, version, etc., and return a list of
pkginfo namedtuples.
'''
cur = {}
keys = itertools.cycle(('name', 'version', 'repoid'))
values = salt.utils.itertools.split(_strip_headers(output))
osarch = __grains__['osarch']
for (key, value) in zip(keys, values):
if key == 'name':
try:
cur['name'], cur['arch'] = value.rsplit('.', 1)
except ValueError:
cur['name'] = value
cur['arch'] = osarch
cur['name'] = salt.utils.pkg.rpm.resolve_name(cur['name'],
cur['arch'],
osarch)
else:
if key == 'version':
# Suppport packages with no 'Release' parameter
value = value.rstrip('-')
elif key == 'repoid':
# Installed packages show a '@' at the beginning
value = value.lstrip('@')
cur[key] = value
if key == 'repoid':
# We're done with this package, create the pkginfo namedtuple
pkginfo = salt.utils.pkg.rpm.pkginfo(**cur)
# Clear the dict for the next package
cur = {}
# Yield the namedtuple
if pkginfo is not None:
yield pkginfo
|
Parse yum/dnf output (which could contain irregular line breaks if package
names are long) retrieving the name, version, etc., and return a list of
pkginfo namedtuples.
|
def print_log(value_color="", value_noncolor=""):
"""set the colors for text."""
HEADER = '\033[92m'
ENDC = '\033[0m'
print(HEADER + value_color + ENDC + str(value_noncolor))
|
set the colors for text.
|
def get_group_filters(self):
"""Return es OR filters to include all special coverage group conditions."""
group_filters = []
field_map = {
"feature-type": "feature_type.slug",
"tag": "tags.slug",
"content-type": "_type"
}
for group_set in self.query.get("groups", []):
for group in group_set:
group_filter = es_filter.MatchAll()
for condition in group.get("conditions", []):
group_filter &= get_condition_filter(condition, field_map=field_map)
group_filters.append(group_filter)
return group_filters
|
Return es OR filters to include all special coverage group conditions.
|
def axes(self):
"""
Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..]
"""
out = self._axes[:]
if self._horizontalAxis:
out.append(self._horizontalAxis)
if self._verticalAxis:
out.append(self._verticalAxis)
return out
|
Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..]
|
def _get_placeholders(sql_statement, parameters):
"""
Retrieve the list of placeholders and their type defined in an SQL
statement.
@param sql_statement: a parameterized statement.
@param parameters: the list of parameters used in the SQL statement.
@return: a dictionary of placeholders where the key represents the
name of a placeholder, the value corresponds to a tuple::
(``type:PlaceholderType``, ``value``)
where :
* ``type``: type of the placeholder
* ``value``: value to replace the placeholder.
"""
# Find the list of placeholders, and their type, defined in the SQL
# statement.
placeholders = {}
try:
for match in REGEX_PATTERN_SQL_PLACEHOLDERS.findall(sql_statement):
for (i, placeholder_type) in enumerate(PlaceholderType._values):
placeholder_name = match[i]
if placeholder_name:
placeholder_value = parameters[placeholder_name]
if placeholder_type == PlaceholderType.nested_list \
and (isinstance(placeholder_value, tuple) and len(placeholder_value) == 1) \
and not isinstance(placeholder_value, (list, set, tuple)):
raise ValueError('The value to replace the placeholder "%s" is not a list as expected' % placeholder_name)
placeholders[placeholder_name] = (placeholder_type, placeholder_value)
break
except KeyError:
raise ValueError('The placeholder %s has no corresponding parameter' % placeholder_name)
# Check whether all the specified parameters have their corresponding
# placeholder in the SQL statement.
undefined_placeholders = [ parameter for parameter in parameters.iterkeys()
if parameter not in placeholders ]
if undefined_placeholders:
raise ValueError('The placeholders %s are missing from the extended pyformat SQL statement\n%s' \
% (', '.join([ '"%s"' % _ for _ in undefined_placeholders ]), sql_statement))
return placeholders
|
Retrieve the list of placeholders and their type defined in an SQL
statement.
@param sql_statement: a parameterized statement.
@param parameters: the list of parameters used in the SQL statement.
@return: a dictionary of placeholders where the key represents the
name of a placeholder, the value corresponds to a tuple::
(``type:PlaceholderType``, ``value``)
where :
* ``type``: type of the placeholder
* ``value``: value to replace the placeholder.
|
def isIsosceles(self):
'''
True iff two side lengths are equal, boolean.
'''
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
|
True iff two side lengths are equal, boolean.
|
def match1(pattern, data, **parse_kwargs):
"""Returns first matched value of pattern in data or None if no matches"""
matches = match(pattern, data, **parse_kwargs)
return matches[0] if matches else None
|
Returns first matched value of pattern in data or None if no matches
|
def _sanity_check_construct_result_block(ir_blocks):
"""Assert that ConstructResult is always the last block, and only the last block."""
if not isinstance(ir_blocks[-1], ConstructResult):
raise AssertionError(u'The last block was not ConstructResult: {}'.format(ir_blocks))
for block in ir_blocks[:-1]:
if isinstance(block, ConstructResult):
raise AssertionError(u'Found ConstructResult before the last block: '
u'{}'.format(ir_blocks))
|
Assert that ConstructResult is always the last block, and only the last block.
|
def enumerate_chunks (phrase, spacy_nlp):
"""
iterate through the noun phrases
"""
if (len(phrase) > 1):
found = False
text = " ".join([rl.text for rl in phrase])
doc = spacy_nlp(text.strip(), parse=True)
for np in doc.noun_chunks:
if np.text != text:
found = True
yield np.text, find_chunk(phrase, np.text.split(" "))
if not found and all([rl.pos[0] != "v" for rl in phrase]):
yield text, phrase
|
iterate through the noun phrases
|
def update(self):
"""This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source
"""
#get the raw data
# self.posts.all().delete() # TODO: handle in update_posts if source changes without deleting every time
data = self._get_data()
#iterate through them and for each item
msg = []
for entry in data:
link = self._get_entry_link(entry)
stored_entry, is_new = Post.objects.get_or_create(link=link)
self._store_post(stored_entry, entry)
if is_new is True:
#self._set_dates(stored_entry)
# self._store_post(stored_entry, entry)
msg.append('Post "%s" added.' % stored_entry.link)
else:
msg.append('Post "%s" already saved.' % stored_entry.link)
self.updated = utils.get_datetime_now()
self.save(no_signals=True)
return '<br />'.join(msg)
|
This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source
|
def canonicalize_path(cwd, path):
"""
Canonicalizes a path relative to a given working directory. That
is, the path, if not absolute, is interpreted relative to the
working directory, then converted to absolute form.
:param cwd: The working directory.
:param path: The path to canonicalize.
:returns: The absolute path.
"""
if not os.path.isabs(path):
path = os.path.join(cwd, path)
return os.path.abspath(path)
|
Canonicalizes a path relative to a given working directory. That
is, the path, if not absolute, is interpreted relative to the
working directory, then converted to absolute form.
:param cwd: The working directory.
:param path: The path to canonicalize.
:returns: The absolute path.
|
def to_repr(value, ctx):
"""
Converts a value back to its representation form, e.g. x -> "x"
"""
as_string = to_string(value, ctx)
if isinstance(value, str) or isinstance(value, datetime.date) or isinstance(value, datetime.time):
as_string = as_string.replace('"', '""') # escape quotes by doubling
as_string = '"%s"' % as_string
return as_string
|
Converts a value back to its representation form, e.g. x -> "x"
|
def simplex_summation_matrix(simplices, weight=None, inverse=False):
'''
simplex_summation_matrix(mtx) yields a scipy sparse array matrix that, when dotted with a
column vector of length m (where m is the number of simplices described in the simplex matrix,
mtx), yields a vector of length n (where n is the number of vertices in the simplex mesh); the
returned vetor is the sum over each vertex, of the faces to which it belongs.
The matrix mtx must be oriented such that the first dimension (rows) corresponds to the vertices
of the simplices and the second dimension (columns) corresponds to simplices themselves.
The optional argument weight may specify a weight for each face, in which case the summation is
a weighted sum instead of a flat sum.
The optional argument inverse=True may be given to indicate that the inverse summation matrix
(summation of the vertices onto the simplices) should be returned.
'''
simplices = np.asarray(simplices)
n = np.max(simplices) + 1
(d,m) = simplices.shape
rng = range(m)
if inverse:
if weight is None: f = sps.csr_matrix
else:
nrng = range(n)
ww = sps.csr_matrix((weight, (nrng, nrng)), shape=(n,n), dtype=np.float)
f = lambda *args,**kwargs: ww.dot(sps.csc_matrix(*args,**kwargs))
s = f((np.ones(d*m, dtype=np.int),
(np.concatenate([rng for _ in range(d)]), np.concatenate(simplices))),
shape=(m,n),
dtype=np.int)
else:
s = sps.csr_matrix(
(np.ones(d*m, dtype=np.int),
(np.concatenate(simplices), np.concatenate([rng for _ in range(d)]))),
shape=(n,m),
dtype=np.int)
if weight is not None:
s = s.dot(sps.csc_matrix((weight, (rng, rng)), shape=(m,m), dtype=np.float))
return s
|
simplex_summation_matrix(mtx) yields a scipy sparse array matrix that, when dotted with a
column vector of length m (where m is the number of simplices described in the simplex matrix,
mtx), yields a vector of length n (where n is the number of vertices in the simplex mesh); the
returned vetor is the sum over each vertex, of the faces to which it belongs.
The matrix mtx must be oriented such that the first dimension (rows) corresponds to the vertices
of the simplices and the second dimension (columns) corresponds to simplices themselves.
The optional argument weight may specify a weight for each face, in which case the summation is
a weighted sum instead of a flat sum.
The optional argument inverse=True may be given to indicate that the inverse summation matrix
(summation of the vertices onto the simplices) should be returned.
|
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol)
|
Plot the poles and zeros of the FIR filter in the z-plane
|
def set_server_key(self, zmq_socket, server_secret_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, server_secret_key_path)
zmq_socket.curve_server = True
|
must call before bind
|
def reset_options(self, empty=True):
"""Empty ALL options.
:param bool empty: When :data:`True`, completelly removes all options;
when :data:`False`, sets them back to its original value.
This function skips ``locked`` control.
"""
if empty:
self.gc = pd.DataFrame(columns=self.clmn)
else:
self.gc["value"] = self.gc["default"]
|
Empty ALL options.
:param bool empty: When :data:`True`, completelly removes all options;
when :data:`False`, sets them back to its original value.
This function skips ``locked`` control.
|
def get_potential_markables(docgraph):
"""
returns a list of all NPs and PPs in the given docgraph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a document graph that (at least) contains syntax trees
(imported from Tiger XML files)
Returns
-------
potential_markables : list of str or int
Node IDs of all nodes that represent an NP/PP syntactical category/phrase
in the input document. If an NP is embedded in a PP, only the node
ID of the PP is returned.
"""
potential_markables = []
for node_id, nattr in dg.select_nodes_by_layer(docgraph, 'tiger:syntax', data=True):
if nattr['tiger:cat'] == 'NP':
# if an NP is embedded into a PP, only print the PP
pp_parent = False
for source, target in docgraph.in_edges(node_id):
parent_node = docgraph.node[source]
if 'tiger:cat' in parent_node and parent_node['tiger:cat'] == 'PP':
potential_markables.append(source) # add parent PP phrase
pp_parent = True
if not pp_parent:
potential_markables.append(node_id) # add NP phrase
elif nattr['tiger:cat'] == 'PP':
potential_markables.append(node_id) # add PP phrase
return potential_markables
|
returns a list of all NPs and PPs in the given docgraph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a document graph that (at least) contains syntax trees
(imported from Tiger XML files)
Returns
-------
potential_markables : list of str or int
Node IDs of all nodes that represent an NP/PP syntactical category/phrase
in the input document. If an NP is embedded in a PP, only the node
ID of the PP is returned.
|
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify):
"""
Overrides checkIfRemoteIsNewer in Source class
:param localfile: str file path
:param remote_size: str bytes
:param remote_modify: str last modify date in the form 20160705042714
:return: boolean True if remote file is newer else False
"""
is_remote_newer = False
status = os.stat(localfile)
LOG.info(
"\nLocal file size: %i"
"\nLocal Timestamp: %s",
status[ST_SIZE], datetime.fromtimestamp(status.st_mtime))
remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify)
if remote_dt != datetime.fromtimestamp(status.st_mtime) or \
status[ST_SIZE] != int(remote_size):
is_remote_newer = True
LOG.info(
"Object on server is has different size %i and/or date %s",
remote_size, remote_dt)
return is_remote_newer
|
Overrides checkIfRemoteIsNewer in Source class
:param localfile: str file path
:param remote_size: str bytes
:param remote_modify: str last modify date in the form 20160705042714
:return: boolean True if remote file is newer else False
|
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
|
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
|
def encode_offset_commit_request(cls, client_id, correlation_id,
group, group_generation_id, consumer_id,
payloads):
"""
Encode some OffsetCommitRequest structs (v1)
:param bytes client_id: string
:param int correlation_id: int
:param str group: the consumer group to which you are committing offsets
:param int group_generation_id: int32, generation ID of the group
:param str consumer_id: string, Identifier for the consumer
:param list payloads: list of :class:`OffsetCommitRequest`
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(
client_id, correlation_id, KafkaCodec.OFFSET_COMMIT_KEY,
api_version=1,
)
message += write_short_ascii(group)
message += struct.pack('>i', group_generation_id)
message += write_short_ascii(consumer_id)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqq', partition, payload.offset,
payload.timestamp)
message += write_short_bytes(payload.metadata)
return message
|
Encode some OffsetCommitRequest structs (v1)
:param bytes client_id: string
:param int correlation_id: int
:param str group: the consumer group to which you are committing offsets
:param int group_generation_id: int32, generation ID of the group
:param str consumer_id: string, Identifier for the consumer
:param list payloads: list of :class:`OffsetCommitRequest`
|
def from_xml_node(xml_node):
"""constructs a CLI.Parameter from an xml node.
:param xml_node:
:type xml_node: xml.etree.ElementTree.Element
:rtype: Executable.Parameter
:return:
"""
def gather_enum_values():
l = []
for element in xml_node.iterfind('element'):
l.append(element.text)
return l
name = xml_node.findtext("name")
type = xml_node.tag
if type in ("label", "description"): return None
default = xml_node.findtext("default")
longflag = xml_node.findtext('longflag')
if default:
default = default.replace('"', '').replace("'", '')
index = xml_node.findtext('index')
label = xml_node.findtext('label') or name or longflag
doc = xml_node.findtext('description')
values = gather_enum_values()
channel = xml_node.findtext('channel')
file_ext = xml_node.attrib.get('fileExtensions', None)
return Parameter(name, type, default, doc, channel, values=values, index=index, label=label,
longflag=longflag, file_ext=file_ext)
|
constructs a CLI.Parameter from an xml node.
:param xml_node:
:type xml_node: xml.etree.ElementTree.Element
:rtype: Executable.Parameter
:return:
|
def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
"""Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, 'Hz').value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
# parse filter (without digital conversions)
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
# calculate frequency response
w, mag, phase = lti.bode(w=frequencies)
# convert from decibels
if not dB:
mag = 10 ** (mag / 10.)
# draw
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline
|
Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
|
def info(name, location='\\'):
r'''
Get the details about a task in the task scheduler.
:param str name: The name of the task for which to return the status
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return:
:rtype: dict
CLI Example:
.. code-block:: bash
salt 'minion-id' task.info <task_name>
'''
# Check for existing folder
if name not in list_tasks(location):
return '{0} not found in {1}'.format(name, location)
# connect to the task scheduler
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# get the folder to delete the folder from
task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name)
properties = {'enabled': task.Enabled,
'last_run': _get_date_value(task.LastRunTime),
'last_run_result': show_win32api_code(task.LastTaskResult),
'missed_runs': task.NumberOfMissedRuns,
'next_run': _get_date_value(task.NextRunTime),
'status': states[task.State]}
def_set = task.Definition.Settings
settings = {}
settings['allow_demand_start'] = def_set.AllowDemandStart
settings['force_stop'] = def_set.AllowHardTerminate
if def_set.DeleteExpiredTaskAfter == '':
settings['delete_after'] = False
elif def_set.DeleteExpiredTaskAfter == 'PT0S':
settings['delete_after'] = 'Immediately'
else:
settings['delete_after'] = _reverse_lookup(duration, def_set.DeleteExpiredTaskAfter)
if def_set.ExecutionTimeLimit == '':
settings['execution_time_limit'] = False
else:
settings['execution_time_limit'] = _reverse_lookup(duration, def_set.ExecutionTimeLimit)
settings['multiple_instances'] = _reverse_lookup(instances, def_set.MultipleInstances)
if def_set.RestartInterval == '':
settings['restart_interval'] = False
else:
settings['restart_interval'] = _reverse_lookup(duration, def_set.RestartInterval)
if settings['restart_interval']:
settings['restart_count'] = def_set.RestartCount
settings['stop_if_on_batteries'] = def_set.StopIfGoingOnBatteries
settings['wake_to_run'] = def_set.WakeToRun
conditions = {}
conditions['ac_only'] = def_set.DisallowStartIfOnBatteries
conditions['run_if_idle'] = def_set.RunOnlyIfIdle
conditions['run_if_network'] = def_set.RunOnlyIfNetworkAvailable
conditions['start_when_available'] = def_set.StartWhenAvailable
if conditions['run_if_idle']:
idle_set = def_set.IdleSettings
conditions['idle_duration'] = idle_set.IdleDuration
conditions['idle_restart'] = idle_set.RestartOnIdle
conditions['idle_stop_on_end'] = idle_set.StopOnIdleEnd
conditions['idle_wait_timeout'] = idle_set.WaitTimeout
if conditions['run_if_network']:
net_set = def_set.NetworkSettings
conditions['network_id'] = net_set.Id
conditions['network_name'] = net_set.Name
actions = []
for actionObj in task.Definition.Actions:
action = {}
action['action_type'] = _reverse_lookup(action_types, actionObj.Type)
if actionObj.Path:
action['cmd'] = actionObj.Path
if actionObj.Arguments:
action['arguments'] = actionObj.Arguments
if actionObj.WorkingDirectory:
action['working_dir'] = actionObj.WorkingDirectory
actions.append(action)
triggers = []
for triggerObj in task.Definition.Triggers:
trigger = {}
trigger['trigger_type'] = _reverse_lookup(trigger_types, triggerObj.Type)
if triggerObj.ExecutionTimeLimit:
trigger['execution_time_limit'] = _reverse_lookup(duration, triggerObj.ExecutionTimeLimit)
if triggerObj.StartBoundary:
start_date, start_time = triggerObj.StartBoundary.split('T', 1)
trigger['start_date'] = start_date
trigger['start_time'] = start_time
if triggerObj.EndBoundary:
end_date, end_time = triggerObj.EndBoundary.split('T', 1)
trigger['end_date'] = end_date
trigger['end_time'] = end_time
trigger['enabled'] = triggerObj.Enabled
if hasattr(triggerObj, 'RandomDelay'):
if triggerObj.RandomDelay:
trigger['random_delay'] = _reverse_lookup(duration, triggerObj.RandomDelay)
else:
trigger['random_delay'] = False
if hasattr(triggerObj, 'Delay'):
if triggerObj.Delay:
trigger['delay'] = _reverse_lookup(duration, triggerObj.Delay)
else:
trigger['delay'] = False
triggers.append(trigger)
properties['settings'] = settings
properties['conditions'] = conditions
properties['actions'] = actions
properties['triggers'] = triggers
ret = properties
return ret
|
r'''
Get the details about a task in the task scheduler.
:param str name: The name of the task for which to return the status
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return:
:rtype: dict
CLI Example:
.. code-block:: bash
salt 'minion-id' task.info <task_name>
|
def fetch_withdrawals(self, limit: int) -> List[Withdrawal]:
"""Fetch latest withdrawals, must provide a limit."""
return self._transactions(self._withdrawals, 'withdrawals', limit)
|
Fetch latest withdrawals, must provide a limit.
|
def set_value(self, value, layer=None, source=None):
"""Set a value for a particular layer with optional metadata about source.
Parameters
----------
value : str
Data to store in the node.
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
source : str
Metadata indicating the source of this value (e.g. a file path)
Raises
------
TypeError
If the node is frozen
KeyError
If the named layer does not exist
"""
if self._frozen:
raise TypeError('Frozen ConfigNode does not support assignment')
if not layer:
layer = self._layers[-1]
self._values[layer] = (source, value)
|
Set a value for a particular layer with optional metadata about source.
Parameters
----------
value : str
Data to store in the node.
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
source : str
Metadata indicating the source of this value (e.g. a file path)
Raises
------
TypeError
If the node is frozen
KeyError
If the named layer does not exist
|
def brief_exception_text(exception, secret_values):
"""
Returns the Exception class and the message of the exception as string.
:param exception: The exception to format
:param secret_values: Values to hide in output
"""
exception_text = _hide_secret_values(str(exception), secret_values)
return '[{}]\n{}'.format(type(exception).__name__, exception_text)
|
Returns the Exception class and the message of the exception as string.
:param exception: The exception to format
:param secret_values: Values to hide in output
|
def json_path_components(path):
"""Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components
"""
if isinstance(path, str):
path = path.split('.')
return list(path)
|
Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components
|
def add(self, variant, arch, image):
"""
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
"""
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image)
|
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
|
def remove_adapter(widget_class, flavour=None):
"""Removes the given widget class information from the default set
of adapters.
If widget_class had been previously added by using add_adapter,
the added adapter will be removed, restoring possibly previusly
existing adapter(s). Notice that this function will remove only
*one* adapter about given wiget_class (the first found in order),
even if many are currently stored.
@param flavour has to be used when the entry was added with a
particular flavour.
Returns True if one adapter was removed, False if no adapter was
removed."""
for it,tu in enumerate(__def_adapter):
if (widget_class == tu[WIDGET] and flavour == tu[FLAVOUR]):
del __def_adapter[it]
return True
return False
|
Removes the given widget class information from the default set
of adapters.
If widget_class had been previously added by using add_adapter,
the added adapter will be removed, restoring possibly previusly
existing adapter(s). Notice that this function will remove only
*one* adapter about given wiget_class (the first found in order),
even if many are currently stored.
@param flavour has to be used when the entry was added with a
particular flavour.
Returns True if one adapter was removed, False if no adapter was
removed.
|
def createmergerequest(self, project_id, sourcebranch, targetbranch,
title, target_project_id=None, assignee_id=None):
"""
Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request
"""
data = {
'source_branch': sourcebranch,
'target_branch': targetbranch,
'title': title,
'assignee_id': assignee_id,
'target_project_id': target_project_id
}
request = requests.post(
'{0}/{1}/merge_requests'.format(self.projects_url, project_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False
|
Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request
|
def clear_priority(self):
"""Removes the priority.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.logging.LogEntryForm.clear_priority_template
if (self.get_priority_metadata().is_read_only() or
self.get_priority_metadata().is_required()):
raise errors.NoAccess()
self._my_map['priority'] = self._priority_default
|
Removes the priority.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
|
def _render(self, contexts, partials):
"""render variable"""
value = self._lookup(self.value, contexts)
# lambda
if callable(value):
value = inner_render(str(value()), contexts, partials)
return self._escape(value)
|
render variable
|
def items(self):
"""Return list of tuples of keys and values in db
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.items()
[('l0', ['1', '2', '3', '4'])]
>>> dc.clear()
:return: list of (key, value) pairs
:rtype: list of tuple
"""
logger.debug('call items')
return [(key, self.get(key)) for key in self.keys()]
|
Return list of tuples of keys and values in db
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.items()
[('l0', ['1', '2', '3', '4'])]
>>> dc.clear()
:return: list of (key, value) pairs
:rtype: list of tuple
|
def parse_file(file):
"""
Take an open file containing the IANA subtag registry, and yield a
dictionary of information for each subtag it describes.
"""
lines = []
for line in file:
line = line.rstrip('\n')
if line == '%%':
# This is a separator between items. Parse the data we've
# collected and yield the result.
yield from parse_item(lines)
lines.clear()
elif line.startswith(' '):
# This is a continuation line. Concatenate it to the previous
# line, including one of the spaces.
lines[-1] += line[1:]
else:
lines.append(line)
yield from parse_item(lines)
|
Take an open file containing the IANA subtag registry, and yield a
dictionary of information for each subtag it describes.
|
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
|
Are we at the end of a tuple?
|
def textFileStream(self, directory, process_all=False):
"""Monitor a directory and process all text files.
File names starting with ``.`` are ignored.
:param string directory: a path
:param bool process_all: whether to process pre-existing files
:rtype: DStream
.. warning::
The ``process_all`` parameter does not exist in the PySpark API.
"""
deserializer = FileTextStreamDeserializer(self._context)
file_stream = FileStream(directory, process_all)
self._on_stop_cb.append(file_stream.stop)
return DStream(file_stream, self, deserializer)
|
Monitor a directory and process all text files.
File names starting with ``.`` are ignored.
:param string directory: a path
:param bool process_all: whether to process pre-existing files
:rtype: DStream
.. warning::
The ``process_all`` parameter does not exist in the PySpark API.
|
def set_glitch_filter(self, user_gpio, steady):
"""
Sets a glitch filter on a GPIO.
Level changes on the GPIO are not reported unless the level
has been stable for at least [*steady*] microseconds. The
level is then reported. Level changes of less than [*steady*]
microseconds are ignored.
user_gpio:= 0-31
steady:= 0-300000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Each (stable) edge will be timestamped [*steady*]
microseconds after it was first detected.
...
pi.set_glitch_filter(23, 100)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_FG, user_gpio, steady)
return _u2i(res)
|
Sets a glitch filter on a GPIO.
Level changes on the GPIO are not reported unless the level
has been stable for at least [*steady*] microseconds. The
level is then reported. Level changes of less than [*steady*]
microseconds are ignored.
user_gpio:= 0-31
steady:= 0-300000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Each (stable) edge will be timestamped [*steady*]
microseconds after it was first detected.
...
pi.set_glitch_filter(23, 100)
...
|
def cmd_list(self, argv, help):
"""Return a list of various things"""
parser = argparse.ArgumentParser(
prog="%s list" % self.progname,
description=help,
)
parser.add_argument("list", nargs=1,
metavar="listname",
help="Name of list to show.",
choices=sorted(self.list_cmds))
parser.add_argument("listopts",
metavar="...",
nargs=argparse.REMAINDER,
help="list command options")
args = parser.parse_args(argv)
for name, func in sorted(self.list_cmds[args.list[0]]):
func(args.listopts, func.__doc__)
|
Return a list of various things
|
def set_default_headers(context):
"""
:type context: behave.runner.Context
"""
headers = row_table(context)
def default_headers_function():
return headers
requestsdefaulter.default_headers(default_headers_function)
|
:type context: behave.runner.Context
|
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new):
"""
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
"""
missing = object()
def maybe_merge(*items):
"""
Merge any non-null items
"""
def not_missing(ob):
return ob is not missing
return functools.reduce(merge, filter(not_missing, items))
new_items = collections.OrderedDict(
(key(el), el)
for el in new
)
old_items = collections.OrderedDict(
(key(el), el)
for el in old
)
# use the old_items as a reference
for old_key, old_item in _mutable_iter(old_items):
if old_key not in new_items:
yield old_item
continue
# yield all new items that appear before the matching key
before, match_new, new_items = _swap_on_miss(
partition_dict(new_items, old_key))
for new_key, new_item in before.items():
# ensure any new keys are merged with previous items if
# they exist
yield maybe_merge(new_item, old_items.pop(new_key, missing))
yield merge(old_item, match_new)
# finally, yield whatever is leftover
# yield from new_items.values()
for item in new_items.values():
yield item
|
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
|
def _update_history(self):
"""Update the history file"""
version = self.data['new_version']
history = self.vcs.history_file()
if not history:
logger.warn("No history file found")
return
history_lines = open(history).read().split('\n')
headings = utils.extract_headings_from_history(history_lines)
if not len(headings):
logger.warn("No detectable existing version headings in the "
"history file.")
inject_location = 0
underline_char = '-'
else:
first = headings[0]
inject_location = first['line']
underline_line = first['line'] + 1
try:
underline_char = history_lines[underline_line][0]
except IndexError:
logger.debug("No character on line below header.")
underline_char = '-'
header = '%s (unreleased)' % version
inject = [header,
underline_char * len(header),
'',
self.data['nothing_changed_yet'],
'',
'']
history_lines[inject_location:inject_location] = inject
contents = '\n'.join(history_lines)
open(history, 'w').write(contents)
logger.info("Injected new section into the history: %r", header)
|
Update the history file
|
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
|
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
|
def get_metadata(self):
"""
Provide metadata about this image.
:return: ImageMetadata, Image metadata instance
"""
if self._metadata is None:
self._metadata = ImageMetadata()
inspect_to_metadata(self._metadata, self.inspect(refresh=True))
return self._metadata
|
Provide metadata about this image.
:return: ImageMetadata, Image metadata instance
|
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None):
"""Leases all available cron jobs."""
leased_jobs = []
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + lease_time
for job in itervalues(self.cronjobs):
if cronjob_ids and job.cron_job_id not in cronjob_ids:
continue
existing_lease = self.cronjob_leases.get(job.cron_job_id)
if existing_lease is None or existing_lease[0] < now:
self.cronjob_leases[job.cron_job_id] = (expiration_time,
utils.ProcessIdString())
job = job.Copy()
job.leased_until, job.leased_by = self.cronjob_leases[job.cron_job_id]
leased_jobs.append(job)
return leased_jobs
|
Leases all available cron jobs.
|
def tool_click(self, evt):
"Event handler tool selection (just add to default handler)"
# get the control
ctrl = self.menu_ctrl_map[evt.GetId()]
# create the control on the parent:
if self.inspector.selected_obj:
# find the first parent drop target
parent = self.inspector.selected_obj
while parent.drop_target is None and parent.get_parent():
parent = parent.get_parent()
# create the new object
obj = ctrl(parent,
name="%s_%s" % (ctrl._meta.name.lower(), wx.NewId()),
pos=(0, 0), designer=self.designer)
# associate the object with the toolbox:
if obj._meta.container:
dt = ToolBoxDropTarget(obj, self.inspector.root_obj,
designer=self.designer,
inspector=self.inspector)
obj.drop_target = dt
# fix width and height if default is not visible
w, h = obj.size
if w <= 10:
obj.width = 100
if h <= 10:
obj.height = 20
# update the object at the inspector (to show the new control)
if self.inspector:
self.inspector.load_object(self.inspector.root_obj) # refresh tree
self.inspector.inspect(obj)
|
Event handler tool selection (just add to default handler)
|
def get_revision():
"""
GET THE CURRENT GIT REVISION
"""
proc = Process("git log", ["git", "log", "-1"])
try:
while True:
line = proc.stdout.pop().strip().decode('utf8')
if not line:
continue
if line.startswith("commit "):
return line[7:]
finally:
with suppress_exception:
proc.join()
|
GET THE CURRENT GIT REVISION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.