Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
374,000
|
def authenticate(self, username, password):
self._username = username
self._password = password
resp = requests.get(
self._url,
auth=(username, password),
**self._default_request_kwargs
)
try:
if resp.status_code == 200:
json_data = resp.json()
token = json_data[][]
elif resp.status_code == 401:
raise errors.AuthFailure(resp.json().get(, ))
else:
raise errors.AuthFailure(
"Unknown exception while authenticating: ".format(resp.text)
)
except errors.AuthFailure:
raise
except Exception as ex:
logging.exception(ex)
raise errors.AuthFailure(.format(ex.__class__.__name__, ex))
self._token = token
logger.info(.format(token))
return token
|
Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
|
374,001
|
def _request_toc_element(self, index):
logger.debug(, index, self.port)
pk = CRTPPacket()
if self._useV2:
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ITEM_V2, index & 0x0ff, (index >> 8) & 0x0ff)
self.cf.send_packet(pk, expected_reply=(
CMD_TOC_ITEM_V2, index & 0x0ff, (index >> 8) & 0x0ff))
else:
pk.set_header(self.port, TOC_CHANNEL)
pk.data = (CMD_TOC_ELEMENT, index)
self.cf.send_packet(pk, expected_reply=(CMD_TOC_ELEMENT, index))
|
Request information about a specific item in the TOC
|
374,002
|
def validate_template(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.validate_template(template_body, template_url)
except BotoServerError as e:
log.debug(e)
msg = .format(template_body)
log.error(msg)
return six.text_type(e)
|
Validate cloudformation template
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.validate_template mystack-template
|
374,003
|
def setup(self, path=None):
candidates = [, ]
if (path):
candidates = [path]
selected = None
for candidate in candidates:
try:
p = subprocess.Popen(candidate, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
(_out_err, _in) = (p.stdout, p.stdin)
versionline = _out_err.read()
if (versionline.find("SExtractor") != -1):
selected = candidate
break
except IOError:
continue
if not(selected):
raise SExtractorException(
)
_program = selected
_version_match = re.search("[Vv]ersion ([0-9\.])+", versionline)
if not _version_match:
raise SExtractorException(
"Cannot determine SExtractor version."
)
_version = _version_match.group()[8:]
if not _version:
raise SExtractorException(
"Cannot determine SExtractor version."
)
return _program, _version
|
Look for SExtractor program ('sextractor', or 'sex').
If a full path is provided, only this path is checked.
Raise a SExtractorException if it failed.
Return program and version if it succeed.
|
374,004
|
def parse_add_loopback():
class Add(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
try:
ipaddress.IPv4Interface("{}/{}".format(values[1], values[2]))
except ipaddress.AddressValueError as e:
raise argparse.ArgumentTypeError("Invalid IP address: {}".format(e))
except ipaddress.NetmaskValueError as e:
raise argparse.ArgumentTypeError("Invalid subnet mask: {}".format(e))
setattr(args, self.dest, values)
return Add
|
Validate params when adding a loopback adapter
|
374,005
|
def Stop(self):
if not self.__running:
return
logging.info(, self._host)
headers = {: }
response, _ = self._http.request( % self._host,
method=, headers=headers)
if response.status != 200:
logging.warning(, response)
self.__running = False
shutil.rmtree(self._tmp_dir)
|
Stops the emulator instance.
|
374,006
|
def IsWalletTransaction(self, tx):
for key, contract in self._contracts.items():
for output in tx.outputs:
if output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes():
return True
for script in tx.scripts:
if script.VerificationScript:
if bytes(contract.Script) == script.VerificationScript:
return True
for watch_script_hash in self._watch_only:
for output in tx.outputs:
if output.ScriptHash == watch_script_hash:
return True
for script in tx.scripts:
if Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash:
return True
return False
|
Verifies if a transaction belongs to the wallet.
Args:
tx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.
Returns:
bool: True, if transaction belongs to wallet. False, if not.
|
374,007
|
def minimum(left, right):
if isinstance(left, Symbol) and isinstance(right, Symbol):
return _internal._Minimum(left, right)
if isinstance(left, Symbol) and isinstance(right, Number):
return _internal._MinimumScalar(left, scalar=right)
if isinstance(left, Number) and isinstance(right, Symbol):
return _internal._MinimumScalar(right, scalar=left)
if isinstance(left, Number) and isinstance(right, Number):
return left if left < right else right
else:
raise TypeError( % (str(type(left)), str(type(right))))
|
Returns element-wise minimum of the input elements.
Both inputs can be Symbol or scalar number. Broadcasting is not supported.
Parameters
---------
left : Symbol or scalar
First symbol to be compared.
right : Symbol or scalar
Second symbol to be compared.
Returns
-------
Symbol or scalar
The element-wise minimum of the input symbols.
Examples
--------
>>> mx.sym.minimum(2, 3.5)
2
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.minimum(x, 4)
>>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()
array([ 3., 4., 2., 4.], dtype=float32)
>>> z = mx.sym.minimum(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()
array([ 3., 2.], dtype=float32)
|
374,008
|
def xpnsl(h1, h2, use_threads=True):
h1 = asarray_ndim(h1, 2)
check_integer_dtype(h1)
h2 = asarray_ndim(h2, 2)
check_integer_dtype(h2)
check_dim0_aligned(h1, h2)
h1 = memoryview_safe(h1)
h2 = memoryview_safe(h2)
if use_threads and multiprocessing.cpu_count() > 1:
pool = ThreadPool(min(4, multiprocessing.cpu_count()))
res1_fwd = pool.apply_async(nsl_scan, args=(h1,))
res2_fwd = pool.apply_async(nsl_scan, args=(h2,))
res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],))
res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],))
pool.close()
pool.join()
nsl1_fwd = res1_fwd.get()
nsl2_fwd = res2_fwd.get()
nsl1_rev = res1_rev.get()
nsl2_rev = res2_rev.get()
pool.terminate()
else:
nsl1_fwd = nsl_scan(h1)
nsl2_fwd = nsl_scan(h2)
nsl1_rev = nsl_scan(h1[::-1])
nsl2_rev = nsl_scan(h2[::-1])
nsl1_rev = nsl1_rev[::-1]
nsl2_rev = nsl2_rev[::-1]
nsl1 = nsl1_fwd + nsl1_rev
nsl2 = nsl2_fwd + nsl2_rev
score = np.log(nsl1 / nsl2)
return score
|
Cross-population version of the NSL statistic.
Parameters
----------
h1 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the first population.
h2 : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array for the second population.
use_threads : bool, optional
If True use multiple threads to compute.
Returns
-------
score : ndarray, float, shape (n_variants,)
Unstandardized XPNSL scores.
|
374,009
|
def parse_content(self, content):
data = {}
data = split_kv_pairs(content, use_partition=False)
self.data = dict((k, v) for k, v in data.items() if not v == )
|
Sample Input::
TimeoutStartUSec=1min 30s
LimitNOFILE=65536
LimitMEMLOCK=
LimitLOCKS=18446744073709551615
Sample Output::
{"LimitNOFILE" : "65536",
"TimeoutStartUSec" : "1min 30s",
"LimitLOCKS" : "18446744073709551615"}
In CMD's output, empty properties are suppressed by default.
We will also suppressed empty properties in return data.
|
374,010
|
def unsubscribe(self, code_list, subtype_list):
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
kargs = {
: code_list,
: subtype_list,
"conn_id": self.get_sync_conn_id()
}
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
continue
code_set = self._ctx_subscribe[subtype]
for code in code_list:
if code not in code_set:
continue
code_set.remove(code)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None
|
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
|
374,011
|
def _connect(self):
if not self._db:
import boto
sdb = boto.connect_sdb()
if not self.domain_name:
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError, e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
|
Connect to our domain
|
374,012
|
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command(, infotype, key, infotype=infotype)
|
Return the encoding, idletime, or refcount about the key
|
374,013
|
async def _read_packet(self, packet_type=MysqlPacket):
buff = b
while True:
try:
packet_header = await self._read_bytes(4)
except asyncio.CancelledError:
self._close_on_cancel()
raise
btrl, btrh, packet_number = struct.unpack(
, packet_header)
bytes_to_read = btrl + (btrh << 16)
if packet_number != self._next_seq_id:
raise InternalError(
"Packet sequence number wrong - got %d expected %d" %
(packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
try:
recv_data = await self._read_bytes(bytes_to_read)
except asyncio.CancelledError:
self._close_on_cancel()
raise
buff += recv_data
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self._encoding)
packet.check_error()
return packet
|
Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
|
374,014
|
def check_model(self):
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError()
cardinalities = self.get_cardinality()
if len(set((x for clique in self.nodes() for x in clique))) != len(cardinalities):
raise ValueError()
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if (cardinalities[variable] != cardinality):
raise ValueError(
.format(var=variable))
return True
|
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
|
374,015
|
def vatu0(self,E,Lz,u0,R,retv2=False):
v2= (2.*(E-actionAngleStaeckel.potentialStaeckel(u0,numpy.pi/2.,
self._pot,
self._delta))
-Lz**2./R**2.)
if retv2: return v2
v2[(v2 < 0.)*(v2 > -10.**-7.)]= 0.
return numpy.sqrt(v2)
|
NAME:
vatu0
PURPOSE:
calculate the velocity at u0
INPUT:
E - energy
Lz - angular momentum
u0 - u0
R - radius corresponding to u0,pi/2.
retv2= (False), if True return v^2
OUTPUT:
velocity
HISTORY:
2012-11-29 - Written - Bovy (IAS)
|
374,016
|
def _get_connection_from_url(self, url, timeout, **kwargs):
url = self._decode_url(url, "")
if url.scheme == or url.scheme == :
return HttpConnection(url.geturl(), timeout=timeout, **kwargs)
else:
if sys.version_info[0] > 2:
raise ValueError("Thrift transport is not available "
"for Python 3")
try:
from thrift_connection import ThriftConnection
except ImportError:
raise ImportError("The python package "
"does not seem to be installed.")
return ThriftConnection(url.hostname, url.port,
timeout=timeout, **kwargs)
|
Returns a connection object given a string url
|
374,017
|
def pause(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError()
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
|
Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
|
374,018
|
def get_heading_encoding(response):
encoding = wpull.protocol.http.util.parse_charset(
response.fields.get(, ))
if encoding:
return wpull.string.normalize_codec_name(encoding)
else:
return None
|
Return the document encoding from a HTTP header.
Args:
response (Response): An instance of :class:`.http.Response`.
Returns:
``str``, ``None``: The codec name.
|
374,019
|
def save(self):
if not self.needs_save():
return defer.succeed(self)
args = []
directories = []
for (key, value) in self.unsaved.items():
if key == :
self.config[] = value
services = list()
for hs in value:
if IOnionClient.providedBy(hs):
parent = IOnionClient(hs).parent
if parent not in services:
services.append(parent)
elif isinstance(hs, (EphemeralOnionService, EphemeralHiddenService)):
raise ValueError(
"Only filesystem based Onion services may be added"
" via TorConfig.hiddenservices; ephemeral services"
" must be created with ."
)
else:
if hs not in services:
services.append(hs)
for hs in services:
for (k, v) in hs.config_attributes():
if k == :
if v not in directories:
directories.append(v)
args.append(k)
args.append(v)
else:
raise RuntimeError("Trying to add hidden service with same HiddenServiceDir: %s" % v)
else:
args.append(k)
args.append(v)
continue
if isinstance(value, list):
for x in value:
if x is not DEFAULT_VALUE:
args.append(key)
args.append(str(x))
else:
args.append(key)
args.append(value)
real_name = self._find_real_name(key)
if not isinstance(value, list) and real_name in self.parsers:
value = self.parsers[real_name].parse(value)
self.config[real_name] = value
if self.protocol:
d = self.protocol.set_conf(*args)
d.addCallback(self._save_completed)
return d
else:
self._save_completed()
return defer.succeed(self)
|
Save any outstanding items. This returns a Deferred which will
errback if Tor was unhappy with anything, or callback with
this TorConfig object on success.
|
374,020
|
def get_bulk_asn_whois(addresses=None, retry_count=3, timeout=120):
if not isinstance(addresses, list):
raise ValueError(
)
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(timeout)
log.debug()
conn.connect((CYMRU_WHOIS, 43))
conn.sendall((
.format(
.join(addresses))
).encode())
data =
while True:
d = conn.recv(4096).decode()
data += d
if not d:
break
conn.close()
return str(data)
except (socket.timeout, socket.error) as e:
log.debug(.format(e))
if retry_count > 0:
log.debug(.format(
str(retry_count)))
return get_bulk_asn_whois(addresses, retry_count - 1, timeout)
else:
raise ASNLookupError()
except:
raise ASNLookupError()
|
The function for retrieving ASN information for multiple IP addresses from
Cymru via port 43/tcp (WHOIS).
Args:
addresses (:obj:`list` of :obj:`str`): IP addresses to lookup.
retry_count (:obj:`int`): The number of times to retry in case socket
errors, timeouts, connection resets, etc. are encountered.
Defaults to 3.
timeout (:obj:`int`): The default timeout for socket connections in
seconds. Defaults to 120.
Returns:
str: The raw ASN bulk data, new line separated.
Raises:
ValueError: addresses argument must be a list of IPv4/v6 address
strings.
ASNLookupError: The ASN bulk lookup failed.
|
374,021
|
def forward(self, is_train=False):
for texec in self.train_execs:
texec.forward(is_train=is_train)
|
Perform a forward pass on each executor.
|
374,022
|
def _process_custom_unitary(self, node):
name = node.name
if node.arguments is not None:
args = self._process_node(node.arguments)
else:
args = []
bits = [self._process_bit_id(node_element)
for node_element in node.bitlist.children]
if name in self.gates:
gargs = self.gates[name]["args"]
gbits = self.gates[name]["bits"]
maxidx = max(map(len, bits))
for idx in range(maxidx):
self.arg_stack.append({gargs[j]: args[j]
for j in range(len(gargs))})
element = [idx*x for x in
[len(bits[j]) > 1 for j in range(len(bits))]]
self.bit_stack.append({gbits[j]: bits[j][element[j]]
for j in range(len(gbits))})
self._create_dag_op(name,
[self.arg_stack[-1][s].sym() for s in gargs],
[self.bit_stack[-1][s] for s in gbits])
self.arg_stack.pop()
self.bit_stack.pop()
else:
raise QiskitError("internal error undefined gate:",
"line=%s" % node.line, "file=%s" % node.file)
|
Process a custom unitary node.
|
374,023
|
def on(self, analyte=None, filt=None):
if isinstance(analyte, str):
analyte = [analyte]
if isinstance(filt, (int, float)):
filt = [filt]
elif isinstance(filt, str):
filt = self.fuzzmatch(filt, multi=True)
if analyte is None:
analyte = self.analytes
if filt is None:
filt = list(self.index.values())
for a in analyte:
for f in filt:
if isinstance(f, (int, float)):
f = self.index[int(f)]
try:
self.switches[a][f] = True
except KeyError:
f = self.fuzzmatch(f, multi=False)
self.switches[a][f] = True
return
|
Turn on specified filter(s) for specified analyte(s).
Parameters
----------
analyte : optional, str or array_like
Name or list of names of analytes.
Defaults to all analytes.
filt : optional. int, str or array_like
Name/number or iterable names/numbers of filters.
Returns
-------
None
|
374,024
|
def alpha_beta_aligned(returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None):
if out is None:
out = np.empty(returns.shape[1:] + (2,), dtype=)
b = beta_aligned(returns, factor_returns, risk_free, out=out[..., 1])
alpha_aligned(
returns,
factor_returns,
risk_free,
period,
annualization,
out=out[..., 0],
_beta=b,
)
return out
|
Calculates annualized alpha and beta.
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should have
the same shape.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
|
374,025
|
def _insert_compressed(
collection_name, docs, check_keys, continue_on_error, opts, ctx):
op_insert, max_bson_size = _insert(
collection_name, docs, check_keys, continue_on_error, opts)
rid, msg = _compress(2002, op_insert, ctx)
return rid, msg, max_bson_size
|
Internal compressed unacknowledged insert message helper.
|
374,026
|
def run_preassembly(stmts_in, **kwargs):
dump_pkl_unique = kwargs.get()
belief_scorer = kwargs.get()
use_hierarchies = kwargs[] if in kwargs else \
hierarchies
be = BeliefEngine(scorer=belief_scorer)
pa = Preassembler(hierarchies, stmts_in)
run_preassembly_duplicate(pa, be, save=dump_pkl_unique)
dump_pkl = kwargs.get()
return_toplevel = kwargs.get(, True)
poolsize = kwargs.get(, None)
size_cutoff = kwargs.get(, 100)
options = {: dump_pkl, : return_toplevel,
: poolsize, : size_cutoff,
: kwargs.get(, False),
:
kwargs.get(, )
}
stmts_out = run_preassembly_related(pa, be, **options)
return stmts_out
|
Run preassembly on a list of statements.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to preassemble.
return_toplevel : Optional[bool]
If True, only the top-level statements are returned. If False,
all statements are returned irrespective of level of specificity.
Default: True
poolsize : Optional[int]
The number of worker processes to use to parallelize the
comparisons performed by the function. If None (default), no
parallelization is performed. NOTE: Parallelization is only
available on Python 3.4 and above.
size_cutoff : Optional[int]
Groups with size_cutoff or more statements are sent to worker
processes, while smaller groups are compared in the parent process.
Default value is 100. Not relevant when parallelization is not
used.
belief_scorer : Optional[indra.belief.BeliefScorer]
Instance of BeliefScorer class to use in calculating Statement
probabilities. If None is provided (default), then the default
scorer is used.
hierarchies : Optional[dict]
Dict of hierarchy managers to use for preassembly
flatten_evidence : Optional[bool]
If True, evidences are collected and flattened via supports/supported_by
links. Default: False
flatten_evidence_collect_from : Optional[str]
String indicating whether to collect and flatten evidence from the
`supports` attribute of each statement or the `supported_by` attribute.
If not set, defaults to 'supported_by'.
Only relevant when flatten_evidence is True.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
save_unique : Optional[str]
The name of a pickle file to save the unique statements into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of preassembled top-level statements.
|
374,027
|
def add_resource(self, value, name: str = , context_attr: str = None,
types: Union[type, Sequence[type]] = ()) -> None:
assert check_argument_types()
self._check_closed()
if isinstance(types, type):
types = (types,)
elif not types:
types = (type(value),)
if value is None:
raise ValueError()
if not resource_name_re.fullmatch(name):
raise ValueError(
)
if context_attr and getattr_static(self, context_attr, None) is not None:
raise ResourceConflict(.format(
context_attr))
for resource_type in types:
if (resource_type, name) in self._resources:
raise ResourceConflict(
.
format(qualified_name(resource_type), name))
resource = ResourceContainer(value, tuple(types), name, context_attr, False)
for type_ in resource.types:
self._resources[(type_, name)] = resource
if context_attr:
setattr(self, context_attr, value)
self.resource_added.dispatch(types, name, False)
|
Add a resource to this context.
This will cause a ``resource_added`` event to be dispatched.
:param value: the actual resource value
:param name: name of this resource (unique among all its registered types within a single
context)
:param context_attr: name of the context attribute this resource will be accessible as
:param types: type(s) to register the resource as (omit to use the type of ``value``)
:raises asphalt.core.context.ResourceConflict: if the resource conflicts with an existing
one in any way
|
374,028
|
def getopt(self, name, argv, opts):
while argv and argv[0] and argv[0][0] == :
try:
argv = opts[argv[0]].parse(name, self, argv)
except KeyError:
raise OptionError( % argv[0])
except IndexError:
raise OptionError( % argv[0])
return argv
|
getopt(name, argv, opts)
Parse X command line options, inserting the recognised options
into the resource database.
NAME is the application name, and will be prepended to all
specifiers. ARGV is the list of command line arguments,
typically sys.argv[1:].
OPTS is a mapping of options to resource specifiers. The key is
the option flag (with leading -), and the value is an instance of
some Option subclass:
NoArg(specifier, value): set resource to value.
IsArg(specifier): set resource to option itself
SepArg(specifier): value is next argument
ResArg: resource and value in next argument
SkipArg: ignore this option and next argument
SkipLine: ignore rest of arguments
SkipNArgs(count): ignore this option and count arguments
The remaining, non-option, oparguments is returned.
rdb.OptionError is raised if there is an error in the argument list.
|
374,029
|
def can_use_autofor(self, node):
auto_for = (isinstance(node.target, ast.Name) and
node.target.id in self.scope[node] and
node.target.id not in self.openmp_deps)
auto_for &= not metadata.get(node, OMPDirective)
auto_for &= node.target.id not in self.openmp_deps
return auto_for
|
Check if given for Node can use autoFor syntax.
To use auto_for:
- iterator should have local scope
- yield should not be use
- OpenMP pragma should not be use
TODO : Yield should block only if it is use in the for loop, not in the
whole function.
|
374,030
|
def simxGetFloatingParameter(clientID, paramIdentifier, operationMode):
paramValue = ct.c_float()
return c_GetFloatingParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
|
Please have a look at the function description/documentation in the V-REP user manual
|
374,031
|
def add_connection(self, host):
self.hosts.append(host)
self.set_connections(self.hosts)
|
Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
|
374,032
|
def nr_of_antenna(nbl, auto_correlations=False):
t = 1 if auto_correlations is False else -1
return int(t + math.sqrt(1 + 8*nbl)) // 2
|
Compute the number of antenna for the
given number of baselines. Can specify whether
auto-correlations should be taken into
account
|
374,033
|
def f(self, m):
if len(self.children) == 0:
return self._f(m)
elif len(self.children) == 1:
return self._f(m) and self.children[0].f(m)
else:
raise Exception(
f"{self.__name__} does not support more than one child Matcher"
)
|
The recursively composed version of filter function f.
By default, returns logical **conjunction** of operator and single
child operator
|
374,034
|
def forward_events_to(self, sink, include_source=False):
assert isinstance(sink, Eventful), f
self._forwards[sink] = include_source
|
This forwards signal to sink
|
374,035
|
def download_csv(data, filename):
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read())
|
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
|
374,036
|
def md5(text):
h = hashlib.md5()
h.update(_unicode(text).encode("utf-8"))
return h.hexdigest()
|
Returns the md5 hash of a string.
|
374,037
|
def delete_chat_photo(chat_id, **kwargs):
params = dict(chat_id=chat_id)
return TelegramBotRPCRequest(, params=params, on_result=lambda result: result, **kwargs)
|
Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:return: Returns True on success.
:rtype: bool
|
374,038
|
def crsConvert(crsIn, crsOut):
if isinstance(crsIn, osr.SpatialReference):
srs = crsIn.Clone()
else:
srs = osr.SpatialReference()
if isinstance(crsIn, int):
crsIn = .format(crsIn)
if isinstance(crsIn, str):
try:
srs.SetFromUserInput(crsIn)
except RuntimeError:
raise TypeError()
else:
raise TypeError()
if crsOut == :
return srs.ExportToWkt()
elif crsOut == :
return srs.ExportToPrettyWkt()
elif crsOut == :
return srs.ExportToProj4()
elif crsOut == :
srs.AutoIdentifyEPSG()
return int(srs.GetAuthorityCode(None))
elif crsOut == :
srs.AutoIdentifyEPSG()
return .format(srs.GetAuthorityCode(None))
elif crsOut == :
return srs
else:
raise ValueError()
|
convert between different types of spatial references
Parameters
----------
crsIn: int, str or :osgeo:class:`osr.SpatialReference`
the input CRS
crsOut: {'wkt', 'proj4', 'epsg', 'osr', 'opengis' or 'prettyWkt'}
the output CRS type
Returns
-------
int, str or :osgeo:class:`osr.SpatialReference`
the output CRS
Examples
--------
convert an integer EPSG code to PROJ4:
>>> crsConvert(4326, 'proj4')
'+proj=longlat +datum=WGS84 +no_defs '
convert a PROJ4 string to an opengis URL:
>>> crsConvert('+proj=longlat +datum=WGS84 +no_defs ', 'opengis')
'http://www.opengis.net/def/crs/EPSG/0/4326'
convert the opengis URL back to EPSG:
>>> crsConvert('http://www.opengis.net/def/crs/EPSG/0/4326', 'epsg')
4326
convert an EPSG compound CRS (WGS84 horizontal + EGM96 vertical)
>>> crsConvert('EPSG:4326+5773', 'proj4')
'+proj=longlat +datum=WGS84 +geoidgrids=egm96_15.gtx +vunits=m +no_defs '
|
374,039
|
def acquire(self, tag, blocking=True):
logger.debug("Acquiring %s", tag)
if not self._semaphore.acquire(blocking):
raise NoResourcesAvailable("Cannot acquire tag " % tag)
|
Acquire the semaphore
:param tag: A tag identifying what is acquiring the semaphore. Note
that this is not really needed to directly use this class but is
needed for API compatibility with the SlidingWindowSemaphore
implementation.
:param block: If True, block until it can be acquired. If False,
do not block and raise an exception if cannot be aquired.
:returns: A token (can be None) to use when releasing the semaphore
|
374,040
|
def with_user_roles(roles):
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if current_user.is_authenticated():
if not hasattr(current_user, "role"):
raise AttributeError("<> doesncurrent_user'")
if current_user.role not in roles:
return abort(403)
else:
return abort(401)
return f(*args, **kwargs)
return wrapped
return wrapper
|
with_user_roles(roles)
It allows to check if a user has access to a view by adding the decorator
with_user_roles([])
Requires flask-login
In your model, you must have a property 'role', which will be invoked to
be compared to the roles provided.
If current_user doesn't have a role, it will throw a 403
If the current_user is not logged in will throw a 401
* Require Flask-Login
---
Usage
@app.route('/user')
@login_require
@with_user_roles(['admin', 'user'])
def user_page(self):
return "You've got permission to access this page."
|
374,041
|
def read(fname,**kw):
Ext remove the edges from domains before
concatenation and dons, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
gzipgzipguessgzip\.gz$gziprboverrideoverridedump_typevardump_typevprintvprintdump_typevarquantitiesvarkeep_edgesfirst_sortsortsortkeep_xsreturn_arraydump_type']]();
except KeyError:
raise NotImplementedError("Other file types not implemented yet!");
return d;
|
Reads an lsp output file and returns a raw dump of data,
sectioned into quantities either as an dictionary or a typed numpy array.
Parameters:
-----------
fname -- filename of thing to read
Keyword Arguments:
------------------
vprint -- Verbose printer. Used in scripts
override -- (type, start) => A tuple of a dump type and a place to start
in the passed file, useful to attempting to read semicorrupted
files.
gzip -- Read as a gzip file.
flds/sclr Specific Arguments:
-----------------------------
var -- list of quantities to be read. For fields, this can consist
of strings that include vector components, e.g., 'Ex'. If
None (default), read all quantities.
keep_edges -- If set to truthy, then don't remove the edges from domains before
concatenation and don't reshape the flds data.
sort -- If not None, sort using these indices, useful for avoiding
resorting. If True and not an ndarray, just sort.
first_sort -- If truthy, sort, and return the sort data for future flds
that should have the same shape.
keep_xs -- Keep the xs's, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
|
374,042
|
def options(self, **options):
self._contexts.append(self._contexts[-1].copy())
self.set_options(**options)
try:
yield
finally:
self._contexts.pop(-1)
|
A context-manager for setting connection options; the original
values of the options will be restored when the context-manager exits.
For example::
with c.options(gui_mode = False):
c.cmd.vol_list()
|
374,043
|
def state(name):
try:
cmd = .format(name)
return _machinectl(cmd, ignore_retcode=True)[].split()[-1]
except IndexError:
return
|
Return state of container (running or stopped)
CLI Example:
.. code-block:: bash
salt myminion nspawn.state <name>
|
374,044
|
def _remove_extraneous_xml_declarations(xml_str):
xml_declaration =
if xml_str.startswith():
xml_declaration, xml_str = xml_str.split(, maxsplit=1)
xml_declaration +=
xml_str = re.sub(r, , xml_str, flags=re.I)
return xml_declaration + xml_str
|
Sometimes devices return XML with more than one XML declaration in, such as when returning
their own XML config files. This removes the extra ones and preserves the first one.
|
374,045
|
def SetSize(self, rect):
"Called to position/size the edit control within the cell rectangle."
self._tc.SetDimensions(rect.x, rect.y, rect.width+2, rect.height+2,
wx.SIZE_ALLOW_MINUS_ONE)
|
Called to position/size the edit control within the cell rectangle.
|
374,046
|
def handle_exception(self, exc):
if isinstance(
exc, (rest_exceptions.NotAuthenticated,
rest_exceptions.AuthenticationFailed)) and self.HANDLE_UNAUTHENTICATED:
return HttpResponseRedirect(.format(
reverse(),
self.request.get_full_path()))
if isinstance(exc, Http404):
raise Http404()
if isinstance(exc, rest_exceptions.PermissionDenied):
raise django_exceptions.PermissionDenied()
return super().handle_exception(exc)
|
Use custom exception handler for errors.
|
374,047
|
def token(config, token):
if not token:
info_out(
"To generate a personal API token, go to:\n\n\t"
"https://github.com/settings/tokens\n\n"
"To read more about it, go to:\n\n\t"
"https://help.github.com/articles/creating-an-access"
"-token-for-command-line-use/\n\n"
)
token = getpass.getpass("GitHub API Token: ").strip()
url = urllib.parse.urljoin(config.github_url, "/user")
assert url.startswith("https://"), url
response = requests.get(url, headers={"Authorization": "token {}".format(token)})
if response.status_code == 200:
update(
config.configfile,
{
"GITHUB": {
"github_url": config.github_url,
"token": token,
"login": response.json()["login"],
}
},
)
name = response.json()["name"] or response.json()["login"]
success_out("Hi! {}".format(name))
else:
error_out("Failed - {} ({})".format(response.status_code, response.content))
|
Store and fetch a GitHub access token
|
374,048
|
def install_docs(instance, clear_target):
_check_root()
def make_docs():
log("Generating HTML documentation")
try:
build = Popen(
[
,
],
cwd=
)
build.wait()
except Exception as e:
log("Problem during documentation building: ", e, type(e),
exc=True, lvl=error)
return False
return True
make_docs()
log("Updating documentation directory:", target)
if not os.path.exists(os.path.join(os.path.curdir, source)):
log(
"Documentation not existing yet. Run python setup.py "
"build_sphinx first.", lvl=error)
return
if os.path.exists(target):
log("Path already exists: " + target)
if clear_target:
log("Cleaning up " + target, lvl=warn)
shutil.rmtree(target)
log("Copying docs to " + target)
copy_tree(source, target)
log("Done: Install Docs")
|
Builds and installs the complete HFOS documentation.
|
374,049
|
def payload_unregister(klass, pid):
cmd = []
for x in [klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
|
is used while a hook is running to let Juju know
that a payload has been manually stopped. The <class> and <id> provided
must match a payload that has been previously registered with juju using
payload-register.
|
374,050
|
def _norm_default(x):
import scipy.linalg
if _blas_is_applicable(x.data):
nrm2 = scipy.linalg.blas.get_blas_funcs(, dtype=x.dtype)
norm = partial(nrm2, n=native(x.size))
else:
norm = np.linalg.norm
return norm(x.data.ravel())
|
Default Euclidean norm implementation.
|
374,051
|
def get_subfolders(self):
headers = self.headers
endpoint = + self.id +
r = requests.get(endpoint, headers=headers)
if check_response(r):
return self._json_to_folders(self.account, r.json())
|
Retrieve all child Folders inside of this Folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`]
|
374,052
|
def _on_stackexchange_user(self, future, access_token, response):
response[] = access_token
future.set_result(response)
|
Invoked as a callback when self.stackexchange_request returns the
response to the request for user data.
:param method future: The callback method to pass along
:param str access_token: The access token for the user's use
:param dict response: The HTTP response already decoded
|
374,053
|
def cols_strip(df,col_list, dest = False):
if not dest:
return _pd.DataFrame({col_name:col_strip(df,col_name) for col_name in col_list})
for col_name in col_list:
col_strip(df,col_name,dest)
|
Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
374,054
|
def _yum_pkginfo(output):
cur = {}
keys = itertools.cycle((, , ))
values = salt.utils.itertools.split(_strip_headers(output))
osarch = __grains__[]
for (key, value) in zip(keys, values):
if key == :
try:
cur[], cur[] = value.rsplit(, 1)
except ValueError:
cur[] = value
cur[] = osarch
cur[] = salt.utils.pkg.rpm.resolve_name(cur[],
cur[],
osarch)
else:
if key == :
value = value.rstrip()
elif key == :
value = value.lstrip()
cur[key] = value
if key == :
pkginfo = salt.utils.pkg.rpm.pkginfo(**cur)
cur = {}
if pkginfo is not None:
yield pkginfo
|
Parse yum/dnf output (which could contain irregular line breaks if package
names are long) retrieving the name, version, etc., and return a list of
pkginfo namedtuples.
|
374,055
|
def print_log(value_color="", value_noncolor=""):
HEADER =
ENDC =
print(HEADER + value_color + ENDC + str(value_noncolor))
|
set the colors for text.
|
374,056
|
def get_group_filters(self):
group_filters = []
field_map = {
"feature-type": "feature_type.slug",
"tag": "tags.slug",
"content-type": "_type"
}
for group_set in self.query.get("groups", []):
for group in group_set:
group_filter = es_filter.MatchAll()
for condition in group.get("conditions", []):
group_filter &= get_condition_filter(condition, field_map=field_map)
group_filters.append(group_filter)
return group_filters
|
Return es OR filters to include all special coverage group conditions.
|
374,057
|
def axes(self):
out = self._axes[:]
if self._horizontalAxis:
out.append(self._horizontalAxis)
if self._verticalAxis:
out.append(self._verticalAxis)
return out
|
Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..]
|
374,058
|
def _get_placeholders(sql_statement, parameters):
placeholders = {}
try:
for match in REGEX_PATTERN_SQL_PLACEHOLDERS.findall(sql_statement):
for (i, placeholder_type) in enumerate(PlaceholderType._values):
placeholder_name = match[i]
if placeholder_name:
placeholder_value = parameters[placeholder_name]
if placeholder_type == PlaceholderType.nested_list \
and (isinstance(placeholder_value, tuple) and len(placeholder_value) == 1) \
and not isinstance(placeholder_value, (list, set, tuple)):
raise ValueError( % placeholder_name)
placeholders[placeholder_name] = (placeholder_type, placeholder_value)
break
except KeyError:
raise ValueError( % placeholder_name)
undefined_placeholders = [ parameter for parameter in parameters.iterkeys()
if parameter not in placeholders ]
if undefined_placeholders:
raise ValueError( \
% (.join([ % _ for _ in undefined_placeholders ]), sql_statement))
return placeholders
|
Retrieve the list of placeholders and their type defined in an SQL
statement.
@param sql_statement: a parameterized statement.
@param parameters: the list of parameters used in the SQL statement.
@return: a dictionary of placeholders where the key represents the
name of a placeholder, the value corresponds to a tuple::
(``type:PlaceholderType``, ``value``)
where :
* ``type``: type of the placeholder
* ``value``: value to replace the placeholder.
|
374,059
|
def isIsosceles(self):
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
|
True iff two side lengths are equal, boolean.
|
374,060
|
def match1(pattern, data, **parse_kwargs):
matches = match(pattern, data, **parse_kwargs)
return matches[0] if matches else None
|
Returns first matched value of pattern in data or None if no matches
|
374,061
|
def _sanity_check_construct_result_block(ir_blocks):
if not isinstance(ir_blocks[-1], ConstructResult):
raise AssertionError(u.format(ir_blocks))
for block in ir_blocks[:-1]:
if isinstance(block, ConstructResult):
raise AssertionError(u
u.format(ir_blocks))
|
Assert that ConstructResult is always the last block, and only the last block.
|
374,062
|
def enumerate_chunks (phrase, spacy_nlp):
if (len(phrase) > 1):
found = False
text = " ".join([rl.text for rl in phrase])
doc = spacy_nlp(text.strip(), parse=True)
for np in doc.noun_chunks:
if np.text != text:
found = True
yield np.text, find_chunk(phrase, np.text.split(" "))
if not found and all([rl.pos[0] != "v" for rl in phrase]):
yield text, phrase
|
iterate through the noun phrases
|
374,063
|
def update(self):
data = self._get_data()
msg = []
for entry in data:
link = self._get_entry_link(entry)
stored_entry, is_new = Post.objects.get_or_create(link=link)
self._store_post(stored_entry, entry)
if is_new is True:
msg.append( % stored_entry.link)
else:
msg.append( % stored_entry.link)
self.updated = utils.get_datetime_now()
self.save(no_signals=True)
return .join(msg)
|
This method should be called to update associated Posts
It will call content-specific methods:
_get_data() to obtain list of entries
_store_post() to store obtained entry object
_get_data_source_url() to get an URL to identify Posts from this Data Source
|
374,064
|
def canonicalize_path(cwd, path):
if not os.path.isabs(path):
path = os.path.join(cwd, path)
return os.path.abspath(path)
|
Canonicalizes a path relative to a given working directory. That
is, the path, if not absolute, is interpreted relative to the
working directory, then converted to absolute form.
:param cwd: The working directory.
:param path: The path to canonicalize.
:returns: The absolute path.
|
374,065
|
def to_repr(value, ctx):
as_string = to_string(value, ctx)
if isinstance(value, str) or isinstance(value, datetime.date) or isinstance(value, datetime.time):
as_string = as_string.replace(, )
as_string = % as_string
return as_string
|
Converts a value back to its representation form, e.g. x -> "x"
|
374,066
|
def simplex_summation_matrix(simplices, weight=None, inverse=False):
simplices = np.asarray(simplices)
n = np.max(simplices) + 1
(d,m) = simplices.shape
rng = range(m)
if inverse:
if weight is None: f = sps.csr_matrix
else:
nrng = range(n)
ww = sps.csr_matrix((weight, (nrng, nrng)), shape=(n,n), dtype=np.float)
f = lambda *args,**kwargs: ww.dot(sps.csc_matrix(*args,**kwargs))
s = f((np.ones(d*m, dtype=np.int),
(np.concatenate([rng for _ in range(d)]), np.concatenate(simplices))),
shape=(m,n),
dtype=np.int)
else:
s = sps.csr_matrix(
(np.ones(d*m, dtype=np.int),
(np.concatenate(simplices), np.concatenate([rng for _ in range(d)]))),
shape=(n,m),
dtype=np.int)
if weight is not None:
s = s.dot(sps.csc_matrix((weight, (rng, rng)), shape=(m,m), dtype=np.float))
return s
|
simplex_summation_matrix(mtx) yields a scipy sparse array matrix that, when dotted with a
column vector of length m (where m is the number of simplices described in the simplex matrix,
mtx), yields a vector of length n (where n is the number of vertices in the simplex mesh); the
returned vetor is the sum over each vertex, of the faces to which it belongs.
The matrix mtx must be oriented such that the first dimension (rows) corresponds to the vertices
of the simplices and the second dimension (columns) corresponds to simplices themselves.
The optional argument weight may specify a weight for each face, in which case the summation is
a weighted sum instead of a flat sum.
The optional argument inverse=True may be given to indicate that the inverse summation matrix
(summation of the vertices onto the simplices) should be returned.
|
374,067
|
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
iir_d.sos_zplane(self.sos,auto_scale,size,tol)
|
Plot the poles and zeros of the FIR filter in the z-plane
|
374,068
|
def set_server_key(self, zmq_socket, server_secret_key_path):
load_and_set_key(zmq_socket, server_secret_key_path)
zmq_socket.curve_server = True
|
must call before bind
|
374,069
|
def reset_options(self, empty=True):
if empty:
self.gc = pd.DataFrame(columns=self.clmn)
else:
self.gc["value"] = self.gc["default"]
|
Empty ALL options.
:param bool empty: When :data:`True`, completelly removes all options;
when :data:`False`, sets them back to its original value.
This function skips ``locked`` control.
|
374,070
|
def get_potential_markables(docgraph):
potential_markables = []
for node_id, nattr in dg.select_nodes_by_layer(docgraph, , data=True):
if nattr[] == :
pp_parent = False
for source, target in docgraph.in_edges(node_id):
parent_node = docgraph.node[source]
if in parent_node and parent_node[] == :
potential_markables.append(source)
pp_parent = True
if not pp_parent:
potential_markables.append(node_id)
elif nattr[] == :
potential_markables.append(node_id)
return potential_markables
|
returns a list of all NPs and PPs in the given docgraph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a document graph that (at least) contains syntax trees
(imported from Tiger XML files)
Returns
-------
potential_markables : list of str or int
Node IDs of all nodes that represent an NP/PP syntactical category/phrase
in the input document. If an NP is embedded in a PP, only the node
ID of the PP is returned.
|
374,071
|
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify):
is_remote_newer = False
status = os.stat(localfile)
LOG.info(
"\nLocal file size: %i"
"\nLocal Timestamp: %s",
status[ST_SIZE], datetime.fromtimestamp(status.st_mtime))
remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify)
if remote_dt != datetime.fromtimestamp(status.st_mtime) or \
status[ST_SIZE] != int(remote_size):
is_remote_newer = True
LOG.info(
"Object on server is has different size %i and/or date %s",
remote_size, remote_dt)
return is_remote_newer
|
Overrides checkIfRemoteIsNewer in Source class
:param localfile: str file path
:param remote_size: str bytes
:param remote_modify: str last modify date in the form 20160705042714
:return: boolean True if remote file is newer else False
|
374,072
|
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing[])
for immutable in immutable_listing[]:
name = immutable[]
data_hash = immutable[]
if name.startswith( key_prefix ):
key_info.append( {
: name[len(key_prefix):],
: make_immutable_data_url( blockchain_id, name, data_hash )
})
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing[])
for mutable in mutable_listing[]:
name = mutable[]
version = mutable[]
if name.startswith( key_prefix ):
key_info.append( {
: name[len(key_prefix):],
: make_mutable_data_url( blockchain_id, name, version )
})
return key_info
|
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
|
374,073
|
def encode_offset_commit_request(cls, client_id, correlation_id,
group, group_generation_id, consumer_id,
payloads):
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(
client_id, correlation_id, KafkaCodec.OFFSET_COMMIT_KEY,
api_version=1,
)
message += write_short_ascii(group)
message += struct.pack(, group_generation_id)
message += write_short_ascii(consumer_id)
message += struct.pack(, len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack(, len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack(, partition, payload.offset,
payload.timestamp)
message += write_short_bytes(payload.metadata)
return message
|
Encode some OffsetCommitRequest structs (v1)
:param bytes client_id: string
:param int correlation_id: int
:param str group: the consumer group to which you are committing offsets
:param int group_generation_id: int32, generation ID of the group
:param str consumer_id: string, Identifier for the consumer
:param list payloads: list of :class:`OffsetCommitRequest`
|
374,074
|
def from_xml_node(xml_node):
def gather_enum_values():
l = []
for element in xml_node.iterfind():
l.append(element.text)
return l
name = xml_node.findtext("name")
type = xml_node.tag
if type in ("label", "description"): return None
default = xml_node.findtext("default")
longflag = xml_node.findtext()
if default:
default = default.replace(, ).replace("indexlabeldescriptionchannelfileExtensions', None)
return Parameter(name, type, default, doc, channel, values=values, index=index, label=label,
longflag=longflag, file_ext=file_ext)
|
constructs a CLI.Parameter from an xml node.
:param xml_node:
:type xml_node: xml.etree.ElementTree.Element
:rtype: Executable.Parameter
:return:
|
374,075
|
def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, ).value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
w, mag, phase = lti.bode(w=frequencies)
if not dB:
mag = 10 ** (mag / 10.)
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline
|
Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
|
374,076
|
def info(name, location=):
r\\minion-id
if name not in list_tasks(location):
return .format(name, location)
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name)
properties = {: task.Enabled,
: _get_date_value(task.LastRunTime),
: show_win32api_code(task.LastTaskResult),
: task.NumberOfMissedRuns,
: _get_date_value(task.NextRunTime),
: states[task.State]}
def_set = task.Definition.Settings
settings = {}
settings[] = def_set.AllowDemandStart
settings[] = def_set.AllowHardTerminate
if def_set.DeleteExpiredTaskAfter == :
settings[] = False
elif def_set.DeleteExpiredTaskAfter == :
settings[] =
else:
settings[] = _reverse_lookup(duration, def_set.DeleteExpiredTaskAfter)
if def_set.ExecutionTimeLimit == :
settings[] = False
else:
settings[] = _reverse_lookup(duration, def_set.ExecutionTimeLimit)
settings[] = _reverse_lookup(instances, def_set.MultipleInstances)
if def_set.RestartInterval == :
settings[] = False
else:
settings[] = _reverse_lookup(duration, def_set.RestartInterval)
if settings[]:
settings[] = def_set.RestartCount
settings[] = def_set.StopIfGoingOnBatteries
settings[] = def_set.WakeToRun
conditions = {}
conditions[] = def_set.DisallowStartIfOnBatteries
conditions[] = def_set.RunOnlyIfIdle
conditions[] = def_set.RunOnlyIfNetworkAvailable
conditions[] = def_set.StartWhenAvailable
if conditions[]:
idle_set = def_set.IdleSettings
conditions[] = idle_set.IdleDuration
conditions[] = idle_set.RestartOnIdle
conditions[] = idle_set.StopOnIdleEnd
conditions[] = idle_set.WaitTimeout
if conditions[]:
net_set = def_set.NetworkSettings
conditions[] = net_set.Id
conditions[] = net_set.Name
actions = []
for actionObj in task.Definition.Actions:
action = {}
action[] = _reverse_lookup(action_types, actionObj.Type)
if actionObj.Path:
action[] = actionObj.Path
if actionObj.Arguments:
action[] = actionObj.Arguments
if actionObj.WorkingDirectory:
action[] = actionObj.WorkingDirectory
actions.append(action)
triggers = []
for triggerObj in task.Definition.Triggers:
trigger = {}
trigger[] = _reverse_lookup(trigger_types, triggerObj.Type)
if triggerObj.ExecutionTimeLimit:
trigger[] = _reverse_lookup(duration, triggerObj.ExecutionTimeLimit)
if triggerObj.StartBoundary:
start_date, start_time = triggerObj.StartBoundary.split(, 1)
trigger[] = start_date
trigger[] = start_time
if triggerObj.EndBoundary:
end_date, end_time = triggerObj.EndBoundary.split(, 1)
trigger[] = end_date
trigger[] = end_time
trigger[] = triggerObj.Enabled
if hasattr(triggerObj, ):
if triggerObj.RandomDelay:
trigger[] = _reverse_lookup(duration, triggerObj.RandomDelay)
else:
trigger[] = False
if hasattr(triggerObj, ):
if triggerObj.Delay:
trigger[] = _reverse_lookup(duration, triggerObj.Delay)
else:
trigger[] = False
triggers.append(trigger)
properties[] = settings
properties[] = conditions
properties[] = actions
properties[] = triggers
ret = properties
return ret
|
r'''
Get the details about a task in the task scheduler.
:param str name: The name of the task for which to return the status
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return:
:rtype: dict
CLI Example:
.. code-block:: bash
salt 'minion-id' task.info <task_name>
|
374,077
|
def fetch_withdrawals(self, limit: int) -> List[Withdrawal]:
return self._transactions(self._withdrawals, , limit)
|
Fetch latest withdrawals, must provide a limit.
|
374,078
|
def set_value(self, value, layer=None, source=None):
if self._frozen:
raise TypeError()
if not layer:
layer = self._layers[-1]
self._values[layer] = (source, value)
|
Set a value for a particular layer with optional metadata about source.
Parameters
----------
value : str
Data to store in the node.
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
source : str
Metadata indicating the source of this value (e.g. a file path)
Raises
------
TypeError
If the node is frozen
KeyError
If the named layer does not exist
|
374,079
|
def brief_exception_text(exception, secret_values):
exception_text = _hide_secret_values(str(exception), secret_values)
return .format(type(exception).__name__, exception_text)
|
Returns the Exception class and the message of the exception as string.
:param exception: The exception to format
:param secret_values: Values to hide in output
|
374,080
|
def json_path_components(path):
if isinstance(path, str):
path = path.split()
return list(path)
|
Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components
|
374,081
|
def add(self, variant, arch, image):
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image)
|
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
|
374,082
|
def remove_adapter(widget_class, flavour=None):
for it,tu in enumerate(__def_adapter):
if (widget_class == tu[WIDGET] and flavour == tu[FLAVOUR]):
del __def_adapter[it]
return True
return False
|
Removes the given widget class information from the default set
of adapters.
If widget_class had been previously added by using add_adapter,
the added adapter will be removed, restoring possibly previusly
existing adapter(s). Notice that this function will remove only
*one* adapter about given wiget_class (the first found in order),
even if many are currently stored.
@param flavour has to be used when the entry was added with a
particular flavour.
Returns True if one adapter was removed, False if no adapter was
removed.
|
374,083
|
def createmergerequest(self, project_id, sourcebranch, targetbranch,
title, target_project_id=None, assignee_id=None):
data = {
: sourcebranch,
: targetbranch,
: title,
: assignee_id,
: target_project_id
}
request = requests.post(
.format(self.projects_url, project_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False
|
Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request
|
374,084
|
def clear_priority(self):
if (self.get_priority_metadata().is_read_only() or
self.get_priority_metadata().is_required()):
raise errors.NoAccess()
self._my_map[] = self._priority_default
|
Removes the priority.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
|
374,085
|
def _render(self, contexts, partials):
value = self._lookup(self.value, contexts)
if callable(value):
value = inner_render(str(value()), contexts, partials)
return self._escape(value)
|
render variable
|
374,086
|
def items(self):
logger.debug()
return [(key, self.get(key)) for key in self.keys()]
|
Return list of tuples of keys and values in db
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.items()
[('l0', ['1', '2', '3', '4'])]
>>> dc.clear()
:return: list of (key, value) pairs
:rtype: list of tuple
|
374,087
|
def parse_file(file):
lines = []
for line in file:
line = line.rstrip()
if line == :
lines[-1] += line[1:]
else:
lines.append(line)
yield from parse_item(lines)
|
Take an open file containing the IANA subtag registry, and yield a
dictionary of information for each subtag it describes.
|
374,088
|
def is_tuple_end(self, extra_end_rules=None):
if self.stream.current.type in (, , ):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
|
Are we at the end of a tuple?
|
374,089
|
def textFileStream(self, directory, process_all=False):
deserializer = FileTextStreamDeserializer(self._context)
file_stream = FileStream(directory, process_all)
self._on_stop_cb.append(file_stream.stop)
return DStream(file_stream, self, deserializer)
|
Monitor a directory and process all text files.
File names starting with ``.`` are ignored.
:param string directory: a path
:param bool process_all: whether to process pre-existing files
:rtype: DStream
.. warning::
The ``process_all`` parameter does not exist in the PySpark API.
|
374,090
|
def set_glitch_filter(self, user_gpio, steady):
res = yield from self._pigpio_aio_command(_PI_CMD_FG, user_gpio, steady)
return _u2i(res)
|
Sets a glitch filter on a GPIO.
Level changes on the GPIO are not reported unless the level
has been stable for at least [*steady*] microseconds. The
level is then reported. Level changes of less than [*steady*]
microseconds are ignored.
user_gpio:= 0-31
steady:= 0-300000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Each (stable) edge will be timestamped [*steady*]
microseconds after it was first detected.
...
pi.set_glitch_filter(23, 100)
...
|
374,091
|
def cmd_list(self, argv, help):
parser = argparse.ArgumentParser(
prog="%s list" % self.progname,
description=help,
)
parser.add_argument("list", nargs=1,
metavar="listname",
help="Name of list to show.",
choices=sorted(self.list_cmds))
parser.add_argument("listopts",
metavar="...",
nargs=argparse.REMAINDER,
help="list command options")
args = parser.parse_args(argv)
for name, func in sorted(self.list_cmds[args.list[0]]):
func(args.listopts, func.__doc__)
|
Return a list of various things
|
374,092
|
def set_default_headers(context):
headers = row_table(context)
def default_headers_function():
return headers
requestsdefaulter.default_headers(default_headers_function)
|
:type context: behave.runner.Context
|
374,093
|
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new):
missing = object()
def maybe_merge(*items):
def not_missing(ob):
return ob is not missing
return functools.reduce(merge, filter(not_missing, items))
new_items = collections.OrderedDict(
(key(el), el)
for el in new
)
old_items = collections.OrderedDict(
(key(el), el)
for el in old
)
for old_key, old_item in _mutable_iter(old_items):
if old_key not in new_items:
yield old_item
continue
before, match_new, new_items = _swap_on_miss(
partition_dict(new_items, old_key))
for new_key, new_item in before.items():
yield maybe_merge(new_item, old_items.pop(new_key, missing))
yield merge(old_item, match_new)
for item in new_items.values():
yield item
|
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
|
374,094
|
def _update_history(self):
version = self.data[]
history = self.vcs.history_file()
if not history:
logger.warn("No history file found")
return
history_lines = open(history).read().split()
headings = utils.extract_headings_from_history(history_lines)
if not len(headings):
logger.warn("No detectable existing version headings in the "
"history file.")
inject_location = 0
underline_char =
else:
first = headings[0]
inject_location = first[]
underline_line = first[] + 1
try:
underline_char = history_lines[underline_line][0]
except IndexError:
logger.debug("No character on line below header.")
underline_char =
header = % version
inject = [header,
underline_char * len(header),
,
self.data[],
,
]
history_lines[inject_location:inject_location] = inject
contents = .join(history_lines)
open(history, ).write(contents)
logger.info("Injected new section into the history: %r", header)
|
Update the history file
|
374,095
|
def add(name, **kwargs):
*
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create(, name)
new_group.SetInfo()
log.info(, name)
except pywintypes.com_error as exc:
msg = .format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning(, name)
return False
return True
|
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
|
374,096
|
def get_metadata(self):
if self._metadata is None:
self._metadata = ImageMetadata()
inspect_to_metadata(self._metadata, self.inspect(refresh=True))
return self._metadata
|
Provide metadata about this image.
:return: ImageMetadata, Image metadata instance
|
374,097
|
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None):
leased_jobs = []
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + lease_time
for job in itervalues(self.cronjobs):
if cronjob_ids and job.cron_job_id not in cronjob_ids:
continue
existing_lease = self.cronjob_leases.get(job.cron_job_id)
if existing_lease is None or existing_lease[0] < now:
self.cronjob_leases[job.cron_job_id] = (expiration_time,
utils.ProcessIdString())
job = job.Copy()
job.leased_until, job.leased_by = self.cronjob_leases[job.cron_job_id]
leased_jobs.append(job)
return leased_jobs
|
Leases all available cron jobs.
|
374,098
|
def tool_click(self, evt):
"Event handler tool selection (just add to default handler)"
ctrl = self.menu_ctrl_map[evt.GetId()]
if self.inspector.selected_obj:
parent = self.inspector.selected_obj
while parent.drop_target is None and parent.get_parent():
parent = parent.get_parent()
obj = ctrl(parent,
name="%s_%s" % (ctrl._meta.name.lower(), wx.NewId()),
pos=(0, 0), designer=self.designer)
if obj._meta.container:
dt = ToolBoxDropTarget(obj, self.inspector.root_obj,
designer=self.designer,
inspector=self.inspector)
obj.drop_target = dt
w, h = obj.size
if w <= 10:
obj.width = 100
if h <= 10:
obj.height = 20
if self.inspector:
self.inspector.load_object(self.inspector.root_obj)
self.inspector.inspect(obj)
|
Event handler tool selection (just add to default handler)
|
374,099
|
def get_revision():
proc = Process("git log", ["git", "log", "-1"])
try:
while True:
line = proc.stdout.pop().strip().decode()
if not line:
continue
if line.startswith("commit "):
return line[7:]
finally:
with suppress_exception:
proc.join()
|
GET THE CURRENT GIT REVISION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.