Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
377,100
|
def match_column_labels(self, match_value_or_fct, levels=None, max_matches=0, empty_res=1):
allmatches = self.parent._find_column_label_positions(match_value_or_fct, levels)
matches = [m for m in allmatches if m in self.col_ilocs]
if max_matches and len(matches) > max_matches:
matches = matches[:max_matches]
if matches:
return RegionFormatter(self.parent, self.row_ilocs, pd.Int64Index(matches))
elif empty_res:
return self.empty_frame()
|
Check the original DataFrame's column labels to find a subset of the current region
:param match_value_or_fct: value or function(hdr_value) which returns True for match
:param levels: [None, scalar, indexer]
:param max_matches: maximum number of columns to return
:return:
|
377,101
|
def begin(self, *args, **kwargs):
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
pass
else:
begin(*args, **kwargs)
|
Indicate the beginning of a transaction.
During a transaction, connections won't be transparently
replaced, and all errors will be raised to the application.
If the underlying driver supports this method, it will be called
with the given parameters (e.g. for distributed transactions).
|
377,102
|
def run(self, records):
self_name = type(self).__name__
for i, batch in enumerate(grouper(records, self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info(, self_name, i)
try:
for j, proc_batch in enumerate(grouper(
process_records(batch).iteritems(), self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info(, self_name, j, i)
self.upload_records({k: v for k, v in proc_batch}, from_queue=True)
except Exception:
self.logger.exception(, self_name)
return
self.logger.info(, self_name, i)
self.processed_records(batch)
self.logger.info(, self_name)
|
Runs the batch upload
:param records: an iterable containing queue entries
|
377,103
|
def get_logging_file_handler(logger=None, file=None, formatter=LOGGING_DEFAULT_FORMATTER):
logger = LOGGER if logger is None else logger
file = tempfile.NamedTemporaryFile().name if file is None else file
logging_file_handler = logging.FileHandler(file)
logging_file_handler.setFormatter(formatter)
logger.addHandler(logging_file_handler)
return logging_file_handler
|
Adds a logging file handler to given logger or default logger using given file.
:param logger: Logger to add the handler to.
:type logger: Logger
:param file: File to verbose into.
:type file: unicode
:param formatter: Handler formatter.
:type formatter: Formatter
:return: Added handler.
:rtype: Handler
|
377,104
|
def absent(
name,
region=None,
key=None,
keyid=None,
profile=None,
):
ret = {: name, : True, : , : {}}
r = __salt__[](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if in r:
ret[] = False
ret[] = six.text_type(r[])
return ret
if not r[]:
ret[] = .format(
name,
region,
)
return ret
if __opts__[]:
ret[] = None
ret[] = .format(name)
ret[] = {: name, : None}
return ret
r = __salt__[](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if in r:
ret[] = False
ret[] = six.text_type(r[])
return ret
ret[] = .format(name)
ret[][] = name
ret[][] = None
return ret
|
Ensure the named sqs queue is deleted.
name
Name of the SQS queue.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
|
377,105
|
def _validate_string(self, input_string, path_to_root, object_title=):
rules_path_to_root = re.sub(, , path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
: object_title,
: self.schema,
: input_criteria,
: ,
: path_to_root,
: input_string,
: 4001
}
if in input_criteria.keys():
if input_criteria[]:
error_dict[] =
error_dict[] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string < input_criteria[]:
error_dict[] =
error_dict[] = 4022
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string > input_criteria[]:
error_dict[] =
error_dict[] = 4023
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string <= input_criteria[]:
error_dict[] =
error_dict[] = 4024
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string >= input_criteria[]:
error_dict[] =
error_dict[] = 4025
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string != input_criteria[]:
error_dict[] =
error_dict[] = 4026
raise InputValidationError(error_dict)
if in input_criteria.keys():
if len(input_string) < input_criteria[]:
error_dict[] =
error_dict[] = 4012
raise InputValidationError(error_dict)
if in input_criteria.keys():
if len(input_string) > input_criteria[]:
error_dict[] =
error_dict[] = 4013
raise InputValidationError(error_dict)
if in input_criteria.keys():
for regex in input_criteria[]:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict[] =
error_dict[] = 4014
raise InputValidationError(error_dict)
if in input_criteria.keys():
for regex in input_criteria[]:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict[] =
error_dict[] = 4015
raise InputValidationError(error_dict)
if in input_criteria.keys():
regex_match = False
for regex in input_criteria[]:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict[] =
error_dict[] = 4016
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string not in input_criteria[]:
error_dict[] =
error_dict[] = 4041
raise InputValidationError(error_dict)
if in input_criteria.keys():
if input_string in input_criteria[]:
error_dict[] =
error_dict[] = 4042
raise InputValidationError(error_dict)
return input_string
|
a helper method for validating properties of a string
:return: input_string
|
377,106
|
def savePkeyPem(self, pkey, path):
with s_common.genfile(path) as fd:
fd.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
|
Save a private key in PEM format to a file outside the certdir.
|
377,107
|
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
do_assert(backend in ["matplotlib", "cv2"], "Expected backend or , got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray")
plt.show()
|
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
|
377,108
|
def describe_role(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_role(name)
if not info:
return False
role = info.get_role_response.get_role_result.role
role[] = salt.utils.json.loads(_unquote(
role.assume_role_policy_document
))
return False
|
Get information for a role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.describe_role myirole
|
377,109
|
def _get_view_method(self, request):
if hasattr(self, ):
return self.action if self.action else None
return request.method.lower()
|
Get view method.
|
377,110
|
def get_arr_desc(arr):
type_ = type(arr).__name__
shape = getattr(arr, , None)
if shape is not None:
desc =
else:
desc =
return desc.format(type_=type_, shape=shape)
|
Get array description, in the form '<array type> <array shape>
|
377,111
|
def coinc(self, s0, s1, slide, step):
rstat = s0[]**2. + s1[]**2.
cstat = rstat + 2. * self.logsignalrate(s0, s1, slide, step)
cstat[cstat < 0] = 0
return cstat ** 0.5
|
Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: numpy.ndarray
Array of ints. These represent the multiple of the timeslide
interval to bring a pair of single detector triggers into coincidence.
step: float
The timeslide interval in seconds.
Returns
-------
coinc_stat: numpy.ndarray
An array of the coincident ranking statistic values
|
377,112
|
def put(self, obj):
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
if obj is _SHUTDOWNREQUEST:
return
|
Put request into queue.
Args:
obj (cheroot.server.HTTPConnection): HTTP connection
waiting to be processed
|
377,113
|
def copy(self, filename=None):
dst = os.path.join(self.dst_path, filename)
src = os.path.join(self.src_path, filename)
dst_tmp = os.path.join(self.dst_tmp, filename)
self.put(src=src, dst=dst_tmp, callback=self.update_progress, confirm=True)
self.rename(src=dst_tmp, dst=dst)
|
Puts on destination as a temp file, renames on
the destination.
|
377,114
|
def put(self, pid, record):
try:
ids = [data[] for data in json.loads(
request.data.decode())]
except KeyError:
raise WrongFile()
record.files.sort_by(*ids)
record.commit()
db.session.commit()
return self.make_response(obj=record.files, pid=pid, record=record)
|
Handle the sort of the files through the PUT deposit files.
Expected input in body PUT:
.. code-block:: javascript
[
{
"id": 1
},
{
"id": 2
},
...
}
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:returns: The files.
|
377,115
|
def unperturbed_hamiltonian(states):
r
Ne = len(states)
H0 = np.zeros((Ne, Ne), complex)
for i in range(Ne):
H0[i, i] = hbar*states[i].omega
return H0
|
r"""Return the unperturbed atomic hamiltonian for given states.
We calcualte the atomic hamiltonian in the basis of the ground states of \
rubidium 87 (in GHz).
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> magnetic_states = make_list_of_states([g], "magnetic")
>>> print(np.diag(unperturbed_hamiltonian(magnetic_states))/hbar/2/pi*1e-9)
[-4.2717+0.j -4.2717+0.j -4.2717+0.j 2.563 +0.j 2.563 +0.j 2.563 +0.j
2.563 +0.j 2.563 +0.j]
|
377,116
|
def __cost(self, params, phase, X):
params = self.__roll(params)
a = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
calculated_a = [a]
calculated_z = [0]
for i, theta in enumerate(params):
z = calculated_a[-1] * theta.transpose()
a = np.concatenate((np.ones((a.shape[0], 1)), a), axis=1)
calculated_a.append(a)
if phase == 0:
if self.__num_labels > 1:
return np.argmax(calculated_a[-1], axis=1)
return np.round(calculated_a[-1])
J = np.sum(-np.multiply(self.__y, np.log(calculated_a[-1]))-np.multiply(1-self.__y, np.log(1-calculated_a[-1])))/self.__m;
if self.__lambda != 0:
d = calculated_a[-1] - self.__y
else:
d = np.multiply(reversed_d[-1]*params[-i][:,1:], self.sigmoid_grad(calculated_z[-1-i]))
reversed_d.append(d)
theta_grad = reversed_d[-1].transpose() * calculated_a[-i-2] / self.__m
if self.__lambda != 0:
theta_grad += np.concatenate((np.zeros((params[-1-i].shape[0], 1)), params[-1-i][:,1:]), axis=1) * self.__lambda / self.__m
reversed_theta_grad.append(theta_grad)
theta_grad = self.__unroll(reversed(reversed_theta_grad))
return theta_grad
|
Computes activation, cost function, and derivative.
|
377,117
|
def command_for_func(func):
class FuncCommand(BaseCommand):
def run(self):
func()
update_package_data(self.distribution)
return FuncCommand
|
Create a command that calls the given function.
|
377,118
|
def evaluateplanarR2derivs(Pot,R,phi=None,t=0.):
from .Potential import _isNonAxi
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) planarPotential instances is non-axisymmetric, but you did not provide phi")
if isinstance(Pot,list) \
and nu.all([isinstance(p,planarPotential) for p in Pot]):
sum= 0.
for pot in Pot:
if nonAxi:
sum+= pot.R2deriv(R,phi=phi,t=t,use_physical=False)
else:
sum+= pot.R2deriv(R,t=t,use_physical=False)
return sum
elif isinstance(Pot,planarPotential):
if nonAxi:
return Pot.R2deriv(R,phi=phi,t=t,use_physical=False)
else:
return Pot.R2deriv(R,t=t,use_physical=False)
else:
raise PotentialError("Input to is neither a Potential-instance or a list of such instances")
|
NAME:
evaluateplanarR2derivs
PURPOSE:
evaluate the second radial derivative of a (list of) planarPotential instance(s)
INPUT:
Pot - (list of) planarPotential instance(s)
R - Cylindrical radius (can be Quantity)
phi= azimuth (optional; can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
F_R(R(,phi,t))
HISTORY:
2010-10-09 - Written - Bovy (IAS)
|
377,119
|
def view_focused_activity(self) -> str:
output, _ = self._execute(
, self.device_sn, , , , )
return re.findall(r, output)[0]
|
View focused activity.
|
377,120
|
def movie(args):
p = OptionParser(movie.__doc__)
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
add_allmaps_plot_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
inputbed, scaffoldsfasta, seqid = args
gapsize = opts.gapsize
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
fp = open(tourfile)
sizes = Sizes(scaffoldsfasta).mapping
ffmpeg = "ffmpeg"
mkdir(ffmpeg)
score = cur_score = None
i = 1
for header, block in read_block(fp, ">"):
s, tag, label = header[1:].split()
if s != seqid:
continue
tour = block[0].split()
tour = [(x[:-1], x[-1]) for x in tour]
if label.startswith("GA"):
cur_score = label.split("-")[-1]
if cur_score == score:
i += 1
continue
score = cur_score
image_name = ".".join((seqid, "{0:04d}".format(i), label, "pdf"))
if need_update(tourfile, image_name):
fwagp = must_open(agpfile, "w")
order_to_agp(seqid, tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("{0} written to `{1}`".format(header, agpfile))
build([inputbed, scaffoldsfasta, "--cleanup"])
pdf_name = plot([inputbed, seqid, "--title={0}".format(label)])
sh("mv {0} {1}".format(pdf_name, image_name))
if label in ("INIT", "FLIP", "TSP", "FINAL"):
for j in xrange(5):
image_delay = image_name.rsplit(".", 1)[0] + \
".d{0}.pdf".format(j)
sh("cp {0} {1}/{2}".format(image_name, ffmpeg, image_delay))
else:
sh("cp {0} {1}/".format(image_name, ffmpeg))
i += 1
make_movie(ffmpeg, pf)
|
%prog movie input.bed scaffolds.fasta chr1
Visualize history of scaffold OO. The history is contained within the
tourfile, generated by path(). For each historical scaffold OO, the program
plots a separate PDF file. The plots can be combined to show the progression
as a little animation. The third argument limits the plotting to a
specific pseudomolecule, for example `chr1`.
|
377,121
|
def order_transforms(transforms):
outputs = set().union(*[t.outputs for t in transforms])
out = []
remaining = [t for t in transforms]
while remaining:
leftover = []
for t in remaining:
if t.inputs.isdisjoint(outputs):
out.append(t)
outputs -= t.outputs
else:
leftover.append(t)
remaining = leftover
return out
|
Orders transforms to ensure proper chaining.
For example, if `transforms = [B, A, C]`, and `A` produces outputs needed
by `B`, the transforms will be re-rorderd to `[A, B, C]`.
Parameters
----------
transforms : list
List of transform instances to order.
Outputs
-------
list :
List of transformed ordered such that forward transforms can be carried
out without error.
|
377,122
|
def remove_option(self, section, name, value=None):
if self._is_live():
raise RuntimeError()
removed = 0
for option in list(self._data[]):
if value is None or option[] == value:
self._data[].remove(option)
removed += 1
if removed > 0:
return True
return False
|
Remove an option from a unit
Args:
section (str): The section to remove from.
name (str): The item to remove.
value (str, optional): If specified, only the option matching this value will be removed
If not specified, all options with ``name`` in ``section`` will be removed
Returns:
True: At least one item was removed
False: The item requested to remove was not found
|
377,123
|
def stub_request(self, expected_url, filename, status=None, body=None):
self.fake_web = True
self.faker = get_faker(expected_url, filename, status, body)
|
Stub a web request for testing.
|
377,124
|
def _write_packet(self, packet, sec=None, usec=None, caplen=None,
wirelen=None):
if hasattr(packet, "time"):
if sec is None:
sec = int(packet.time)
usec = int(round((packet.time - sec) *
(1000000000 if self.nano else 1000000)))
if usec is None:
usec = 0
rawpkt = raw(packet)
caplen = len(rawpkt) if caplen is None else caplen
if wirelen is None:
if hasattr(packet, "wirelen"):
wirelen = packet.wirelen
if wirelen is None:
wirelen = caplen
RawPcapWriter._write_packet(
self, rawpkt, sec=sec, usec=usec, caplen=caplen, wirelen=wirelen)
|
Writes a single packet to the pcap file.
:param packet: Packet, or bytes for a single packet
:type packet: Packet or bytes
:param sec: time the packet was captured, in seconds since epoch. If
not supplied, defaults to now.
:type sec: int or long
:param usec: If ``nano=True``, then number of nanoseconds after the
second that the packet was captured. If ``nano=False``,
then the number of microseconds after the second the
packet was captured. If ``sec`` is not specified,
this value is ignored.
:type usec: int or long
:param caplen: The length of the packet in the capture file. If not
specified, uses ``len(raw(packet))``.
:type caplen: int
:param wirelen: The length of the packet on the wire. If not
specified, tries ``packet.wirelen``, otherwise uses
``caplen``.
:type wirelen: int
:returns: None
:rtype: None
|
377,125
|
def conference_mute(self, call_params):
path = + self.api_version +
method =
return self.request(path, method, call_params)
|
REST Conference Mute helper
|
377,126
|
def get_window_forecasts(self):
for model_name in self.model_names:
self.window_forecasts[model_name] = {}
for size_threshold in self.size_thresholds:
self.window_forecasts[model_name][size_threshold] = \
np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0)
for sl in self.hour_windows])
|
Aggregate the forecasts within the specified time windows.
|
377,127
|
def create_asset(self, ):
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
if not self.atype:
atypei = self.atype_cb.currentIndex()
assert atypei >= 0
self.atype = self.atypes[atypei]
try:
asset = djadapter.models.Asset(atype=self.atype, project=self.project, name=name, description=desc)
asset.save()
self.asset = asset
self.accept()
except:
log.exception("Could not create new asset")
|
Create a asset and store it in the self.asset
:returns: None
:rtype: None
:raises: None
|
377,128
|
def enable_global_typelogged_profiler(flag = True):
global global_typelogged_profiler, _global_type_agent, global_typechecked_profiler
global_typelogged_profiler = flag
if flag and typelogging_enabled:
if _global_type_agent is None:
_global_type_agent = TypeAgent()
_global_type_agent.start()
elif not _global_type_agent.active:
_global_type_agent.start()
elif not flag and not global_typechecked_profiler and \
not _global_type_agent is None and _global_type_agent.active:
_global_type_agent.stop()
|
Enables or disables global typelogging mode via a profiler.
See flag global_typelogged_profiler.
Does not work if typelogging_enabled is false.
|
377,129
|
def subscribeToDeviceCommands(self, typeId="+", deviceId="+", commandId="+", msgFormat="+"):
if self._config.isQuickstart():
self.logger.warning("QuickStart applications do not support commands")
return 0
topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat)
return self._subscribe(topic, 0)
|
Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
|
377,130
|
def remove(self, child):
try:
self.children.remove(child)
if isinstance(child, String):
child._parent = None
except ValueError:
pass
|
Remove a ``child`` from the list of :attr:`children`.
|
377,131
|
def visit_keyword(self, node):
if node.arg is None:
return "**%s" % node.value.accept(self)
return "%s=%s" % (node.arg, node.value.accept(self))
|
return an astroid.Keyword node as string
|
377,132
|
def setup(service_manager, conf, reload_method="reload"):
conf.register_opts(service_opts)
_load_service_manager_options(service_manager, conf)
def _service_manager_reload():
_configfile_reload(conf, reload_method)
_load_service_manager_options(service_manager, conf)
if os.name != "posix":
return
service_manager.register_hooks(
on_new_worker=functools.partial(
_new_worker_hook, conf, reload_method),
on_reload=_service_manager_reload)
|
Load services configuration from oslo config object.
It reads ServiceManager and Service configuration options from an
oslo_config.ConfigOpts() object. Also It registers a ServiceManager hook to
reload the configuration file on reload in the master process and in all
children. And then when each child start or reload, the configuration
options are logged if the oslo config option 'log_options' is True.
On children, the configuration file is reloaded before the running the
application reload method.
Options currently supported on ServiceManager and Service:
* graceful_shutdown_timeout
:param service_manager: ServiceManager instance
:type service_manager: cotyledon.ServiceManager
:param conf: Oslo Config object
:type conf: oslo_config.ConfigOpts()
:param reload_method: reload or mutate the config files
:type reload_method: str "reload/mutate"
|
377,133
|
def rts_smoother(cls,state_dim, p_dynamic_callables, filter_means,
filter_covars):
no_steps = filter_covars.shape[0]-1
M = np.empty(filter_means.shape)
P = np.empty(filter_covars.shape)
M[-1,:] = filter_means[-1,:]
P[-1,:,:] = filter_covars[-1,:,:]
for k in range(no_steps-1,-1,-1):
m_pred, P_pred, tmp1, tmp2 = \
cls._kalman_prediction_step(k, filter_means[k,:],
filter_covars[k,:,:], p_dynamic_callables,
calc_grad_log_likelihood=False)
p_m = filter_means[k,:]
if len(p_m.shape)<2:
p_m.shape = (p_m.shape[0],1)
p_m_prev_step = M[k+1,:]
if len(p_m_prev_step.shape)<2:
p_m_prev_step.shape = (p_m_prev_step.shape[0],1)
m_upd, P_upd, G_tmp = cls._rts_smoother_update_step(k,
p_m ,filter_covars[k,:,:],
m_pred, P_pred, p_m_prev_step ,P[k+1,:,:], p_dynamic_callables)
M[k,:] = m_upd
P[k,:,:] = P_upd
return (M, P)
|
This function implements Rauch–Tung–Striebel(RTS) smoother algorithm
based on the results of kalman_filter_raw.
These notations are the same:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated smoother distributions x_{k} ~ N(m_{k}, P(k))
Input:
--------------
p_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number), starts at 0
x_{k-1} State from the previous step
A_{k} Jacobian matrices of f_a. In the linear case it is exactly A_{k}.
p_f_A: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_Q: function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number). starts at 0
filter_means: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Results of the Kalman Filter means estimation.
filter_covars: (no_steps+1, state_dim, state_dim) 3D array
Results of the Kalman Filter covariance estimation.
Output:
-------------
M: (no_steps+1, state_dim) matrix
Smoothed estimates of the state means
P: (no_steps+1, state_dim, state_dim) 3D array
Smoothed estimates of the state covariances
|
377,134
|
def runblast(self, assembly, allele, sample):
genome = os.path.split(assembly)[1].split()[0]
make_path(sample[self.analysistype].reportdir)
try:
report = glob(.format(sample[self.analysistype].reportdir, genome))[0]
size = os.path.getsize(report)
if size == 0:
os.remove(report)
report = .format(sample[self.analysistype].reportdir, genome,
time.strftime("%Y.%m.%d.%H.%M.%S"))
except IndexError:
report = .format(sample[self.analysistype].reportdir, genome,
time.strftime("%Y.%m.%d.%H.%M.%S"))
db = allele.split()[0]
blastn = NcbiblastnCommandline(query=assembly, db=db, evalue=, num_alignments=1000000,
num_threads=12,
outfmt="",
out=report)
sample[self.analysistype].blastcommand = str(blastn)
sample[self.analysistype].blastreport = report
if not os.path.isfile(report):
blastn()
self.blastparser(report, sample)
|
Run the BLAST analyses
:param assembly: assembly path/file
:param allele: combined allele file
:param sample: sample object
:return:
|
377,135
|
def job_status(job_id, show_job_key=False, ignore_auth=False):
s data
:statuscode 404: job id not found
:statuscode 409: an error occurred
errorjob_id not founderrornot authorizedapi_keyjob_keyapplication/json')
|
Show a specific job.
**Results:**
:rtype: A dictionary with the following keys
:param status: Status of job (complete, error)
:type status: string
:param sent_data: Input data for job
:type sent_data: json encodable data
:param job_id: An identifier for the job
:type job_id: string
:param result_url: Callback url
:type result_url: url string
:param data: Results from job.
:type data: json encodable data
:param error: Error raised during job execution
:type error: string
:param metadata: Metadata provided when submitting job.
:type metadata: list of key - value pairs
:param requested_timestamp: Time the job started
:type requested_timestamp: timestamp
:param finished_timestamp: Time the job finished
:type finished_timestamp: timestamp
:statuscode 200: no error
:statuscode 403: not authorized to view the job's data
:statuscode 404: job id not found
:statuscode 409: an error occurred
|
377,136
|
def read_config(self, correlation_id, parameters):
value = self._read_object(correlation_id, parameters)
return ConfigParams.from_value(value)
|
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
|
377,137
|
def register_callback_subscribed(self, callback):
return self.__client.register_callback_created(partial(self.__callback_subscribed_filter, callback),
serialised=False)
|
Register a callback for new subscription. This gets called whenever one of *your* things subscribes to something
else.
`Note` it is not called when whenever something else subscribes to your thing.
The payload passed to your callback is either a
[RemoteControl](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteControl) or
[RemoteFeed](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteFeed) instance.
|
377,138
|
def parse_line(self, line, lineno):
if not line.strip():
return
if self.state == self.STATES[] and self.RE_HEADER_LINE.match(line):
return
step_marker_match = self.RE_STEP_MARKER.match(line)
if not step_marker_match:
if self.state != self.STATES[]:
self.start_step(lineno)
self.sub_parser.parse_line(line, lineno)
return
if step_marker_match.group() == :
if self.state == self.STATES[]:
|
Parse a single line of the log.
We have to handle both buildbot style logs as well as Taskcluster logs. The latter
attempt to emulate the buildbot logs, but don't accurately do so, partly due
to the way logs are generated in Taskcluster (ie: on the workers themselves).
Buildbot logs:
builder: ...
slave: ...
starttime: ...
results: ...
buildid: ...
builduid: ...
revision: ...
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
Taskcluster logs (a worst-case example):
<log output outside a step>
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
<log output outside a step>
======= <step START marker> =======
<step log output with no following finish marker>
As can be seen above, Taskcluster logs can have (a) log output that falls between
step markers, and (b) content at the end of the log, that is not followed by a
final finish step marker. We handle this by creating generic placeholder steps to
hold the log output that is not enclosed by step markers, and then by cleaning up
the final step in finish_parse() once all lines have been parsed.
|
377,139
|
def _intersection_with_dsis(self, dsis):
new_si_set = set()
for si in dsis._si_set:
r = self._intersection_with_si(si)
if isinstance(r, StridedInterval):
if not r.is_empty:
new_si_set.add(r)
else:
new_si_set |= r._si_set
if len(new_si_set):
ret = DiscreteStridedIntervalSet(bits=self.bits, si_set=new_si_set)
return ret.normalize()
else:
return StridedInterval.empty(self.bits)
|
Intersection with another :class:`DiscreteStridedIntervalSet`.
:param dsis: The other operand.
:return:
|
377,140
|
def build_managers(app, conf):
default_options = _get_default_options(conf)
manager_descriptions = ManagerDescriptions()
if "job_managers_config" in conf:
job_managers_config = conf.get("job_managers_config", None)
_populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config)
elif "managers" in conf:
for manager_name, manager_options in conf["managers"].items():
manager_description = ManagerDescription.from_dict(manager_options, manager_name)
manager_descriptions.add(manager_description)
elif "manager" in conf:
manager_description = ManagerDescription.from_dict(conf["manager"])
manager_descriptions.add(manager_description)
else:
manager_descriptions.add(ManagerDescription())
manager_classes = _get_managers_dict()
managers = {}
for manager_name, manager_description in manager_descriptions.descriptions.items():
manager_options = dict(default_options)
manager_options.update(manager_description.manager_options)
manager_class = manager_classes[manager_description.manager_type]
manager = _build_manager(manager_class, app, manager_name, manager_options)
managers[manager_name] = manager
return managers
|
Takes in a config file as outlined in job_managers.ini.sample and builds
a dictionary of job manager objects from them.
|
377,141
|
def _init_objaartall(self):
kws = {
:lambda nt: [nt.NS, nt.dcnt],
:(
),
:(
),
}
return AArtGeneProductSetsAll(self.grprdflt, self.hdrobj, **kws)
|
Get background database info for making ASCII art.
|
377,142
|
def get_program(name, config, ptype="cmd", default=None):
config = config.get("config", config)
try:
pconfig = config.get("resources", {})[name]
except KeyError:
pconfig = {}
old_config = config.get("program", {}).get(name, None)
if old_config:
for key in ["dir", "cmd"]:
if not key in pconfig:
pconfig[key] = old_config
if ptype == "cmd":
return _get_program_cmd(name, pconfig, config, default)
elif ptype == "dir":
return _get_program_dir(name, pconfig)
else:
raise ValueError("Don't understand program type: %s" % ptype)
|
Retrieve program information from the configuration.
This handles back compatible location specification in input
YAML. The preferred location for program information is in
`resources` but the older `program` tag is also supported.
|
377,143
|
def _get_error_message(response):
try:
data = response.json()
if "error_description" in data:
return data[]
if "error" in data:
return data[]
except Exception:
pass
return "Unknown error"
|
Attempt to extract an error message from response body
|
377,144
|
def p_annotation_spdx_id_1(self, p):
try:
if six.PY2:
value = p[2].decode(encoding=)
else:
value = p[2]
self.builder.set_annotation_spdx_id(self.document, value)
except CardinalityError:
self.more_than_one_error(, p.lineno(1))
except OrderError:
self.order_error(, , p.lineno(1))
|
annotation_spdx_id : ANNOTATION_SPDX_ID LINE
|
377,145
|
def track_time(self, name, description=, max_rows=None):
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, , description)
self._tables[name] = Timer(name, self, max_rows=max_rows)
|
Create a Timer object in the Tracker.
|
377,146
|
def save_as_pil(self, fname, pixel_array=None):
if pixel_array is None:
pixel_array = self.numpy
from PIL import Image as pillow
pil_image = pillow.fromarray(pixel_array.astype())
pil_image.save(fname)
return True
|
This method saves the image from a numpy array using Pillow
(PIL fork)
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
This method will return True if successful
|
377,147
|
def get_opener(self, protocol):
protocol = protocol or self.default_opener
if self.load_extern:
entry_point = next(
pkg_resources.iter_entry_points("fs.opener", protocol), None
)
else:
entry_point = None
if entry_point is None:
if protocol in self._protocols:
opener_instance = self._protocols[protocol]
else:
raise UnsupportedProtocol(
"protocol is not supported".format(protocol)
)
else:
try:
opener = entry_point.load()
except Exception as exception:
raise EntryPointError(
"could not load entry point; {}".format(exception)
)
if not issubclass(opener, Opener):
raise EntryPointError("entry point did not return an opener")
try:
opener_instance = opener()
except Exception as exception:
raise EntryPointError(
"could not instantiate opener; {}".format(exception)
)
return opener_instance
|
Get the opener class associated to a given protocol.
Arguments:
protocol (str): A filesystem protocol.
Returns:
Opener: an opener instance.
Raises:
~fs.opener.errors.UnsupportedProtocol: If no opener
could be found for the given protocol.
EntryPointLoadingError: If the returned entry point
is not an `Opener` subclass or could not be loaded
successfully.
|
377,148
|
def delete_all_but(self, prefix, name):
if prefix == name:
Log.note("{{index_name}} will not be deleted", {"index_name": prefix})
for a in self.get_aliases():
if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.index) and a.index != name:
self.delete_index(a.index)
|
:param prefix: INDEX MUST HAVE THIS AS A PREFIX AND THE REMAINDER MUST BE DATE_TIME
:param name: INDEX WITH THIS NAME IS NOT DELETED
:return:
|
377,149
|
def get_undefined_namespaces(graph: BELGraph) -> Set[str]:
return {
exc.namespace
for _, exc, _ in graph.warnings
if isinstance(exc, UndefinedNamespaceWarning)
}
|
Get all namespaces that are used in the BEL graph aren't actually defined.
|
377,150
|
def cons(self, i):
if self.b[i] in :
return False
elif self.b[i] == :
return True if i == 0 else not self.cons(i-1)
return True
|
True iff b[i] is a consonant
|
377,151
|
def load(ctx, variant_source, family_file, family_type, root):
root = root or ctx.obj.get() or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error(" canpuzzle_db.sqlite3puzzle initunknowngeminiSet puzzle backend to {0}Set variant type to {0}'.format(variant_type))
cases = get_cases(
variant_source=variant_source,
case_lines=family_file,
case_type=family_type,
variant_type=variant_type,
variant_mode=mode
)
if len(cases) == 0:
logger.warning("No cases found")
ctx.abort()
logger.info("Initializing sqlite plugin")
store = SqlStore(db_path)
for case_obj in cases:
if store.case(case_obj.case_id) is not None:
logger.warn("{} already exists in the database"
.format(case_obj.case_id))
continue
logger.debug("adding case: {} to puzzle db".format(case_obj.case_id))
store.add_case(case_obj, vtype=variant_type, mode=mode)
|
Load a variant source into the database.
If no database was found run puzzle init first.
1. VCF: If a vcf file is used it can be loaded with a ped file
2. GEMINI: Ped information will be retreived from the gemini db
|
377,152
|
def parse(text):
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub(, value.strip())
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text
|
Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
|
377,153
|
def allFileExists(fileList):
allExists = True
for fileName in fileList:
allExists = allExists and os.path.isfile(fileName)
return allExists
|
Check that all file exists.
:param fileList: the list of file to check.
:type fileList: list
Check if all the files in ``fileList`` exists.
|
377,154
|
def rpc_name(rpc_id):
name = _RPC_NAME_MAP.get(rpc_id)
if name is None:
name = % rpc_id
return name
|
Map an RPC id to a string name.
This function looks the RPC up in a map of all globally declared RPCs,
and returns a nice name string. if the RPC is not found in the global
name map, returns a generic name string such as 'rpc 0x%04X'.
Args:
rpc_id (int): The id of the RPC that we wish to look up.
Returns:
str: The nice name of the RPC.
|
377,155
|
def _handshake(self):
session_context = None
ssl_policy_ref = None
crl_search_ref = None
crl_policy_ref = None
ocsp_search_ref = None
ocsp_policy_ref = None
policy_array_ref = None
try:
if osx_version_info < (10, 8):
session_context_pointer = new(Security, )
result = Security.SSLNewContext(False, session_context_pointer)
handle_sec_error(result)
session_context = unwrap(session_context_pointer)
else:
session_context = Security.SSLCreateContext(
null(),
SecurityConst.kSSLClientSide,
SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
session_context,
_read_callback_pointer,
_write_callback_pointer
)
handle_sec_error(result)
self._connection_id = id(self) % 2147483647
_connection_refs[self._connection_id] = self
_socket_refs[self._connection_id] = self._socket
result = Security.SSLSetConnection(session_context, self._connection_id)
handle_sec_error(result)
utf8_domain = self._hostname.encode()
result = Security.SSLSetPeerDomainName(
session_context,
utf8_domain,
len(utf8_domain)
)
handle_sec_error(result)
if osx_version_info >= (10, 10):
disable_auto_validation = self._session._manual_validation or self._session._extra_trust_roots
explicit_validation = (not self._session._manual_validation) and self._session._extra_trust_roots
else:
disable_auto_validation = True
explicit_validation = not self._session._manual_validation
if osx_version_info < (10, 8):
for protocol in [, , ]:
protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]
enabled = protocol in self._session._protocols
result = Security.SSLSetProtocolVersionEnabled(
session_context,
protocol_const,
enabled
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetEnableCertVerify(session_context, False)
handle_sec_error(result)
else:
protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]
min_protocol = min(protocol_consts)
max_protocol = max(protocol_consts)
result = Security.SSLSetProtocolVersionMin(
session_context,
min_protocol
)
handle_sec_error(result)
result = Security.SSLSetProtocolVersionMax(
session_context,
max_protocol
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetSessionOption(
session_context,
SecurityConst.kSSLSessionOptionBreakOnServerAuth,
True
)
handle_sec_error(result)
supported_ciphers_pointer = new(Security, )
result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
cipher_buffer = buffer_from_bytes(supported_ciphers * 4)
supported_cipher_suites_pointer = cast(Security, , cipher_buffer)
result = Security.SSLGetSupportedCiphers(
session_context,
supported_cipher_suites_pointer,
supported_ciphers_pointer
)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
supported_cipher_suites = array_from_pointer(
Security,
,
supported_cipher_suites_pointer,
supported_ciphers
)
good_ciphers = []
for supported_cipher_suite in supported_cipher_suites:
cipher_suite = int_to_bytes(supported_cipher_suite, width=2)
cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)
good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is None
if good_cipher:
good_ciphers.append(supported_cipher_suite)
num_good_ciphers = len(good_ciphers)
good_ciphers_array = new(Security, , num_good_ciphers)
array_set(good_ciphers_array, good_ciphers)
good_ciphers_pointer = cast(Security, , good_ciphers_array)
result = Security.SSLSetEnabledCiphers(
session_context,
good_ciphers_pointer,
num_good_ciphers
)
handle_sec_error(result)
peer_id = self._session._peer_id + self._hostname.encode()
result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))
handle_sec_error(result)
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
if osx_version_info < (10, 8) and osx_version_info >= (10, 7):
do_validation = explicit_validation and handshake_result == 0
else:
do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompleted
if do_validation:
trust_ref_pointer = new(Security, )
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)
ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)
result = CoreFoundation.CFRelease(cf_string_hostname)
handle_cf_error(result)
ocsp_oid_pointer = struct(Security, )
ocsp_oid = unwrap(ocsp_oid_pointer)
ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid.Data = cast(Security, , ocsp_oid_buffer)
ocsp_search_ref_pointer = new(Security, )
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
ocsp_oid_pointer,
null(),
ocsp_search_ref_pointer
)
handle_sec_error(result)
ocsp_search_ref = unwrap(ocsp_search_ref_pointer)
ocsp_policy_ref_pointer = new(Security, )
result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)
handle_sec_error(result)
ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)
ocsp_struct_pointer = struct(Security, )
ocsp_struct = unwrap(ocsp_struct_pointer)
ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSION
ocsp_struct.Flags = (
SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |
SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE
)
ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)
cssm_data_pointer = struct(Security, )
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(ocsp_struct_bytes)
ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)
cssm_data.Data = cast(Security, , ocsp_struct_buffer)
result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)
handle_sec_error(result)
crl_oid_pointer = struct(Security, )
crl_oid = unwrap(crl_oid_pointer)
crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid.Data = cast(Security, , crl_oid_buffer)
crl_search_ref_pointer = new(Security, )
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
crl_oid_pointer,
null(),
crl_search_ref_pointer
)
handle_sec_error(result)
crl_search_ref = unwrap(crl_search_ref_pointer)
crl_policy_ref_pointer = new(Security, )
result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)
handle_sec_error(result)
crl_policy_ref = unwrap(crl_policy_ref_pointer)
crl_struct_pointer = struct(Security, )
crl_struct = unwrap(crl_struct_pointer)
crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSION
crl_struct.CrlFlags = 0
crl_struct_bytes = struct_bytes(crl_struct_pointer)
cssm_data_pointer = struct(Security, )
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(crl_struct_bytes)
crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)
cssm_data.Data = cast(Security, , crl_struct_buffer)
result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)
handle_sec_error(result)
policy_array_ref = CFHelpers.cf_array_from_list([
ssl_policy_ref,
crl_policy_ref,
ocsp_policy_ref
])
result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)
handle_sec_error(result)
if self._session._extra_trust_roots:
ca_cert_refs = []
ca_certs = []
for cert in self._session._extra_trust_roots:
ca_cert = load_certificate(cert)
ca_certs.append(ca_cert)
ca_cert_refs.append(ca_cert.sec_certificate_ref)
result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)
handle_sec_error(result)
array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)
result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)
handle_sec_error(result)
result_pointer = new(Security, )
result = Security.SecTrustEvaluate(trust_ref, result_pointer)
handle_sec_error(result)
trust_result_code = deref(result_pointer)
invalid_chain_error_codes = set([
SecurityConst.kSecTrustResultProceed,
SecurityConst.kSecTrustResultUnspecified
])
if trust_result_code not in invalid_chain_error_codes:
handshake_result = SecurityConst.errSSLXCertChainInvalid
else:
handshake_result = Security.SSLHandshake(session_context)
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
self._done_handshake = True
handshake_error_codes = set([
SecurityConst.errSSLXCertChainInvalid,
SecurityConst.errSSLCertExpired,
SecurityConst.errSSLCertNotYetValid,
SecurityConst.errSSLUnknownRootCert,
SecurityConst.errSSLNoRootCert,
SecurityConst.errSSLHostNameMismatch,
SecurityConst.errSSLInternal,
])
if handshake_result in handshake_error_codes:
trust_ref_pointer = new(Security, )
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
result_code_pointer = new(Security, )
result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)
result_code = deref(result_code_pointer)
chain = extract_chain(self._server_hello)
self_signed = False
revoked = False
expired = False
not_yet_valid = False
no_issuer = False
cert = None
bad_hostname = False
if chain:
cert = chain[0]
oscrypto_cert = load_certificate(cert)
self_signed = oscrypto_cert.self_signed
revoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKED
no_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTED
expired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIRED
not_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YET
bad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCH
if osx_version_info >= (10, 12):
validity = cert[][]
not_before = validity[].chosen.native
not_after = validity[].chosen.native
utcnow = datetime.datetime.now(timezone.utc)
expired = not_after < utcnow
not_yet_valid = not_before > utcnow
if chain and chain[0].hash_algo in set([, ]):
raise_weak_signature(chain[0])
if revoked:
raise_revoked(cert)
if bad_hostname:
raise_hostname(cert, self._hostname)
elif expired or not_yet_valid:
raise_expired_not_yet_valid(cert)
elif no_issuer:
raise_no_issuer(cert)
elif self_signed:
raise_self_signed(cert)
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_verification(cert)
if handshake_result == SecurityConst.errSSLPeerHandshakeFail:
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_handshake()
if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:
raise_dh_params()
if handshake_result == SecurityConst.errSSLPeerProtocolVersion:
raise_protocol_version()
if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):
self._server_hello += _read_remaining(self._socket)
raise_protocol_error(self._server_hello)
if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):
if not self._done_handshake:
self._server_hello += _read_remaining(self._socket)
if detect_other_protocol(self._server_hello):
raise_protocol_error(self._server_hello)
raise_disconnection()
if osx_version_info < (10, 10):
dh_params_length = get_dh_params_length(self._server_hello)
if dh_params_length is not None and dh_params_length < 1024:
raise_dh_params()
would_block = handshake_result == SecurityConst.errSSLWouldBlock
server_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompleted
manual_validation = self._session._manual_validation and server_auth_complete
if not would_block and not manual_validation:
handle_sec_error(handshake_result, TLSError)
self._session_context = session_context
protocol_const_pointer = new(Security, )
result = Security.SSLGetNegotiatedProtocolVersion(
session_context,
protocol_const_pointer
)
handle_sec_error(result)
protocol_const = deref(protocol_const_pointer)
self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]
cipher_int_pointer = new(Security, )
result = Security.SSLGetNegotiatedCipher(
session_context,
cipher_int_pointer
)
handle_sec_error(result)
cipher_int = deref(cipher_int_pointer)
cipher_bytes = int_to_bytes(cipher_int, width=2)
self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)
session_info = parse_session_info(
self._server_hello,
self._client_hello
)
self._compression = session_info[]
self._session_id = session_info[]
self._session_ticket = session_info[]
except (OSError, socket_.error):
if session_context:
if osx_version_info < (10, 8):
result = Security.SSLDisposeContext(session_context)
handle_sec_error(result)
else:
result = CoreFoundation.CFRelease(session_context)
handle_cf_error(result)
self._session_context = None
self.close()
raise
finally:
if ssl_policy_ref:
result = CoreFoundation.CFRelease(ssl_policy_ref)
handle_cf_error(result)
ssl_policy_ref = None
if crl_policy_ref:
result = CoreFoundation.CFRelease(crl_policy_ref)
handle_cf_error(result)
crl_policy_ref = None
if ocsp_policy_ref:
result = CoreFoundation.CFRelease(ocsp_policy_ref)
handle_cf_error(result)
ocsp_policy_ref = None
if policy_array_ref:
result = CoreFoundation.CFRelease(policy_array_ref)
handle_cf_error(result)
policy_array_ref = None
|
Perform an initial TLS handshake
|
377,156
|
def reorderChild(self, parent, newitem):
source = self.getItem(parent).childItems
target = newitem.childItems
i = 0
while i < len(source):
if source[i] == target[i]:
i += 1
continue
else:
i0 = i
j0 = source.index(target[i0])
j = j0 + 1
while j < len(source):
if source[j] == target[j - j0 + i0]:
j += 1
continue
else:
break
self.moveRows(parent, i0, j0, j - j0)
i += j - j0
|
Reorder a list to match target by moving a sequence at a time.
Written for QtAbstractItemModel.moveRows.
|
377,157
|
def set_duplicated_flag(self):
package_by_name = defaultdict(list)
for package1 in self._root_package.all_packages:
if package1 is None:
continue
pkg_name = package1.package_name
param_list = self._config.get_fails(, {})
params1 = package1.get_params(param_list)
for package2 in package_by_name[pkg_name]:
params2 = package2.get_params(param_list)
for x in param_list:
param1 = params1[x]
param2 = params2[x]
if isinstance(param1, list):
param1 = [str(x) for x in param1]
if isinstance(param2, list):
param2 = [str(x) for x in param2]
if str(param1) != str(param2):
package1.duplicated = True
package2.duplicated = True
package_by_name[pkg_name].append(package1)
|
For all package set flag duplicated, if it's not unique package
:return:
|
377,158
|
def theme_color(self):
color = self._color
if color is None or color.themeColor is None:
return None
return color.themeColor
|
A member of :ref:`MsoThemeColorIndex` or |None| if no theme color is
specified. When :attr:`type` is `MSO_COLOR_TYPE.THEME`, the value of
this property will always be a member of :ref:`MsoThemeColorIndex`.
When :attr:`type` has any other value, the value of this property is
|None|.
Assigning a member of :ref:`MsoThemeColorIndex` causes :attr:`type`
to become `MSO_COLOR_TYPE.THEME`. Any existing RGB value is retained
but ignored by Word. Assigning |None| causes any color specification
to be removed such that the effective color is inherited from the
style hierarchy.
|
377,159
|
def add_program(self, name=None):
if name is None:
name = + str(self._next_prog_id)
self._next_prog_id += 1
if name in self._programs:
raise KeyError("Program named already exists." % name)
prog = ModularProgram(self._vcode, self._fcode)
for key, val in self._set_items.items():
prog[key] = val
self.frag._new_program(prog)
self.vert._new_program(prog)
self._programs[name] = prog
return prog
|
Create a program and add it to this MultiProgram.
It is the caller's responsibility to keep a reference to the returned
program.
The *name* must be unique, but is otherwise arbitrary and used for
debugging purposes.
|
377,160
|
def _coerce_dtype(self, other_dtype):
if self._dtype is None:
new_dtype = np.dtype(other_dtype)
else:
new_dtype = np.find_common_type([self._dtype, np.dtype(other_dtype)], [])
if new_dtype != self.dtype:
self.set_dtype(new_dtype)
|
Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
|
377,161
|
def check_overlap(self, other, wavelengths=None, threshold=0.01):
if not isinstance(other, BaseSpectrum):
raise exceptions.SynphotError(
)
if wavelengths is None:
if other.waveset is None:
return
if self.waveset is None:
return
x1 = self._validate_wavelengths(wavelengths)
y1 = self(x1)
a = x1[y1 > 0].value
b = other._validate_wavelengths(wavelengths).value
result = utils.overlap_status(a, b)
if result == :
return result
|
Check for wavelength overlap between two spectra.
Only wavelengths where ``self`` throughput is non-zero
are considered.
Example of full overlap::
|---------- other ----------|
|------ self ------|
Examples of partial overlap::
|---------- self ----------|
|------ other ------|
|---- other ----|
|---- self ----|
|---- self ----|
|---- other ----|
Examples of no overlap::
|---- self ----| |---- other ----|
|---- other ----| |---- self ----|
Parameters
----------
other : `BaseSpectrum`
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for integration.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
threshold : float
If less than this fraction of flux or throughput falls
outside wavelength overlap, the *lack* of overlap is
*insignificant*. This is only used when partial overlap
is detected. Default is 1%.
Returns
-------
result : {'full', 'partial_most', 'partial_notmost', 'none'}
* 'full' - ``self`` coverage is within or same as ``other``
* 'partial_most' - Less than ``threshold`` fraction of
``self`` flux is outside the overlapping wavelength
region, i.e., the *lack* of overlap is *insignificant*
* 'partial_notmost' - ``self`` partially overlaps with
``other`` but does not qualify for 'partial_most'
* 'none' - ``self`` does not overlap ``other``
Raises
------
synphot.exceptions.SynphotError
Invalid inputs.
|
377,162
|
def pyeapi_config(commands=None,
config_file=None,
template_engine=,
context=None,
defaults=None,
saltenv=,
**kwargs):
*ntp server 1.2.3.4
pyeapi_kwargs = pyeapi_nxos_api_args(**kwargs)
return __salt__[](commands=commands,
config_file=config_file,
template_engine=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv,
**pyeapi_kwargs)
|
.. versionadded:: 2019.2.0
Configures the Arista switch with the specified commands, via the ``pyeapi``
library. This function forwards the existing connection details to the
:mod:`pyeapi.run_commands <salt.module.arista_pyeapi.run_commands>`
execution function.
commands
The list of configuration commands to load on the Arista switch.
.. note::
This argument is ignored when ``config_file`` is specified.
config_file
The source file with the configuration commands to be sent to the device.
The file can also be a template that can be rendered using the template
engine of choice. This can be specified using the absolute path to the
file, or using one of the following URL schemes:
- ``salt://``
- ``https://``
- ``ftp:/``
- ``s3:/``
- ``swift://``
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context: ``None``
Variables to add to the template context.
defaults: ``None``
Default values of the ``context`` dict.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. Ignored if
``config_file`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_config 'ntp server 1.2.3.4'
|
377,163
|
def show_xticklabels(self, row, column):
subplot = self.get_subplot_at(row, column)
subplot.show_xticklabels()
|
Show the x-axis tick labels for a subplot.
:param row,column: specify the subplot.
|
377,164
|
def env_string(name, required=False, default=empty):
value = get_env_value(name, default=default, required=required)
if value is empty:
value =
return value
|
Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
|
377,165
|
def load_spectrum(path, smoothing=181, DF=-8.):
try:
ang, lflam = np.loadtxt(path, usecols=(0,1)).T
except ValueError:
with open(path, ) as f:
def lines():
for line in f:
yield line.replace(b, b)
ang, lflam = np.genfromtxt(lines(), delimiter=(13, 12)).T
z = ang.argsort()
ang = ang[z]
flam = 10**(lflam[z] + DF)
del z
if smoothing is not None:
if isinstance(smoothing, int):
smoothing = np.hamming(smoothing)
else:
smoothing = np.asarray(smoothing)
wnorm = np.convolve(np.ones_like(smoothing), smoothing, mode=)
smoothing = smoothing / wnorm
smooth = lambda a: np.convolve(a, smoothing, mode=)[::smoothing.size]
ang = smooth(ang)
flam = smooth(flam)
return pd.DataFrame({: ang, : flam})
|
Load a Phoenix model atmosphere spectrum.
path : string
The file path to load.
smoothing : integer
Smoothing to apply. If None, do not smooth. If an integer, smooth with a
Hamming window. Otherwise, the variable is assumed to be a different
smoothing window, and the data will be convolved with it.
DF: float
Numerical factor used to compute the emergent flux density.
Returns a Pandas DataFrame containing the columns:
wlen
Sample wavelength in Angstrom.
flam
Flux density in erg/cm²/s/Å. See `pwkit.synphot` for related tools.
The values of *flam* returned by this function are computed from the
second column of the data file as specified in the documentation: ``flam =
10**(col2 + DF)``. The documentation states that the default value, -8, is
appropriate for most modern models; but some older models use other
values.
Loading takes about 5 seconds on my current laptop. Un-smoothed spectra
have about 630,000 samples.
|
377,166
|
def get_bool(_bytearray, byte_index, bool_index):
index_value = 1 << bool_index
byte_value = _bytearray[byte_index]
current_value = byte_value & index_value
return current_value == index_value
|
Get the boolean value from location in bytearray
|
377,167
|
def format_status(self, width=None,
label_width=None,
progress_width=None,
summary_width=None):
if width is None:
width = shutil.get_terminal_size()[0]
if label_width is None:
label_width = len(self.label)
if summary_width is None:
summary_width = self.summary_width()
if progress_width is None:
progress_width = width - label_width - summary_width - 2
if len(self.label) > label_width:
label = self.label[:label_width - 3] + "..."
else:
label_format = "{{label:{fill_char}<{width}}}".format(
width=label_width,
fill_char=self.fill_char)
label = label_format.format(label=self.label)
summary_format = "{{:>{width}}}".format(width=summary_width)
summary = summary_format.format(self._progress.format_summary())
progress = self._progress.format_progress(width=progress_width)
return "{label} {progress} {summary}".format(
label=label,
progress=progress,
summary=summary
)
|
Generate the formatted status bar string.
|
377,168
|
def split_address(address):
invalid = None, None
if not address and address != 0:
return invalid
components = str(address).split()
if len(components) > 2:
return invalid
if components[0] and not valid_hostname(components[0]):
return invalid
if len(components) == 2 and not valid_port(components[1]):
return invalid
if len(components) == 1:
components.insert(0 if valid_port(components[0]) else 1, None)
host, port = components
port = int(port) if port else None
return host, port
|
Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid.
|
377,169
|
def send_terrain_data(self):
for bit in range(56):
if self.current_request.mask & (1<<bit) and self.sent_mask & (1<<bit) == 0:
self.send_terrain_data_bit(bit)
return
self.current_request = None
self.sent_mask = 0
|
send some terrain data
|
377,170
|
def capture_termination_signal(please_stop):
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker)
|
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
|
377,171
|
def MACRO_DEFINITION(self, cursor):
if (not hasattr(cursor, ) or cursor.location is None or
cursor.location.file is None):
return False
name = self.get_unique_name(cursor)
comment = None
tokens = self._literal_handling(cursor)
value = True
if isinstance(tokens, list):
if len(tokens) == 2:
value = tokens[1]
else:
value = .join(tokens[1:])
for t in cursor.get_tokens():
if t.kind == TokenKind.COMMENT:
comment = t.spelling
if name == or value == :
value = None
log.debug(, tokens[0], value)
obj = typedesc.Macro(name, None, value)
try:
self.register(name, obj)
except DuplicateDefinitionException:
log.info(
,
name, self.parser.all[name].args, value)
self.parser.all[name] = obj
self.set_location(obj, cursor)
obj.comment = comment
return True
|
Parse MACRO_DEFINITION, only present if the TranslationUnit is
used with TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD.
|
377,172
|
def get_persistent_boot_device(self):
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
if ((sushy_system.
boot.enabled) == sushy.BOOT_SOURCE_ENABLED_CONTINUOUS):
return PERSISTENT_BOOT_MAP.get(sushy_system.boot.target)
if not self._is_boot_mode_uefi():
return None
try:
boot_device = (sushy_system.bios_settings.boot_settings.
get_persistent_boot_device())
return PERSISTENT_BOOT_MAP.get(boot_device)
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller is unable to get "
"persistent boot device. Error %(error)s") %
{: str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
|
Get current persistent boot device set for the host
:returns: persistent boot device for the system
:raises: IloError, on an error from iLO.
|
377,173
|
def libvlc_video_get_adjust_float(p_mi, option):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,), (1,),), None,
ctypes.c_float, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
|
Get float adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
|
377,174
|
def port_profile_domain_profile_profile_name(self, **kwargs):
config = ET.Element("config")
port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile_domain_name_key = ET.SubElement(port_profile_domain, "port-profile-domain-name")
port_profile_domain_name_key.text = kwargs.pop()
profile = ET.SubElement(port_profile_domain, "profile")
profile_name = ET.SubElement(profile, "profile-name")
profile_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
377,175
|
def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric=, numIterMax=10000, stopThr=1e-9, verbose=False, log=False, **kwargs):
if log:
sinkhorn_loss_ab, log_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_a, log_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_b, log_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_div = sinkhorn_loss_ab - 1 / 2 * (sinkhorn_loss_a + sinkhorn_loss_b)
log = {}
log[] = sinkhorn_loss_ab
log[] = sinkhorn_loss_a
log[] = sinkhorn_loss_b
log[] = log_ab
log[] = log_a
log[] = log_b
return max(0, sinkhorn_div), log
else:
sinkhorn_loss_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_div = sinkhorn_loss_ab - 1 / 2 * (sinkhorn_loss_a + sinkhorn_loss_b)
return max(0, sinkhorn_div)
|
Compute the sinkhorn divergence loss from empirical data
The function solves the following optimization problems and return the
sinkhorn divergence :math:`S`:
.. math::
W &= \min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
W_a &= \min_{\gamma_a} <\gamma_a,M_a>_F + reg\cdot\Omega(\gamma_a)
W_b &= \min_{\gamma_b} <\gamma_b,M_b>_F + reg\cdot\Omega(\gamma_b)
S &= W - 1/2 * (W_a + W_b)
.. math::
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
\gamma_a 1 = a
\gamma_a^T 1= a
\gamma_a\geq 0
\gamma_b 1 = b
\gamma_b^T 1= b
\gamma_b\geq 0
where :
- :math:`M` (resp. :math:`M_a, M_b`) is the (ns,nt) metric cost matrix (resp (ns, ns) and (nt, nt))
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- :math:`a` and :math:`b` are source and target weights (sum to 1)
Parameters
----------
X_s : np.ndarray (ns, d)
samples in the source domain
X_t : np.ndarray (nt, d)
samples in the target domain
reg : float
Regularization term >0
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,)
samples weights in the target domain
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Regularized optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> n_s = 2
>>> n_t = 4
>>> reg = 0.1
>>> X_s = np.reshape(np.arange(n_s), (n_s, 1))
>>> X_t = np.reshape(np.arange(0, n_t), (n_t, 1))
>>> emp_sinkhorn_div = empirical_sinkhorn_divergence(X_s, X_t, reg)
>>> print(emp_sinkhorn_div)
>>> [2.99977435]
References
----------
.. [23] Aude Genevay, Gabriel Peyré, Marco Cuturi, Learning Generative Models with Sinkhorn Divergences, Proceedings of the Twenty-First International Conference on Artficial Intelligence and Statistics, (AISTATS) 21, 2018
|
377,176
|
def set_attribute(self, key, value):
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError(
)
|
Add or update the value of an attribute.
|
377,177
|
def get_voltage(self, channel):
ret = self.ask("V%dO?" % channel)
if ret[-1] != "V":
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[:-1])
|
channel: 1=OP1, 2=OP2, AUX is not supported
|
377,178
|
def dialog_mode(self, dialog_mode):
if not self.is_soundbar:
message =
raise NotSupportedException(message)
self.renderingControl.SetEQ([
(, 0),
(, ),
(, int(dialog_mode))
])
|
Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
|
377,179
|
def n_point_crossover(random, mom, dad, args):
crossover_rate = args.setdefault(, 1.0)
num_crossover_points = args.setdefault(, 1)
children = []
if random.random() < crossover_rate:
num_cuts = min(len(mom)-1, num_crossover_points)
cut_points = random.sample(range(1, len(mom)), num_cuts)
cut_points.sort()
bro = copy.copy(dad)
sis = copy.copy(mom)
normal = True
for i, (m, d) in enumerate(zip(mom, dad)):
if i in cut_points:
normal = not normal
if not normal:
bro[i] = m
sis[i] = d
normal = not normal
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
|
Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1)
|
377,180
|
def _get_function_transitions(self,
expression: Union[str, List],
expected_type: PredicateType) -> Tuple[List[str],
PredicateType,
List[PredicateType]]:
transitions, function_type = self._get_transitions(expression, None)
elif expression in self._functions:
name = expression
function_types = self._function_types[expression]
if len(function_types) != 1:
raise ParsingError(f"{expression} had multiple types; this is not yet supported for functions")
function_type = function_types[0]
transitions = [f]
else:
if isinstance(expression, str):
raise ParsingError(f"Unrecognized function: {expression[0]}")
else:
raise ParsingError(f"Unsupported expression type: {expression}")
if not isinstance(function_type, FunctionType):
raise ParsingError(f)
return transitions, return_type, argument_types
|
A helper method for ``_get_transitions``. This gets the transitions for the predicate
itself in a function call. If we only had simple functions (e.g., "(add 2 3)"), this would
be pretty straightforward and we wouldn't need a separate method to handle it. We split it
out into its own method because handling higher-order functions is complicated (e.g.,
something like "((negate add) 2 3)").
|
377,181
|
def scan(self, string):
w = (constraint.words for constraint in self.sequence if not constraint.optional)
w = itertools.chain(*w)
w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]]
if w and not any(w in string.lower() for w in w):
return False
return True
|
Returns True if search(Sentence(string)) may yield matches.
If is often faster to scan prior to creating a Sentence and searching it.
|
377,182
|
def replace(state, host, name, match, replace, flags=None):
yield sed_replace(name, match, replace, flags=flags)
|
A simple shortcut for replacing text in files with sed.
+ name: target remote file to edit
+ match: text/regex to match for
+ replace: text to replace with
+ flags: list of flaggs to pass to sed
|
377,183
|
async def georadius(self, name, longitude, latitude, radius, unit=None,
withdist=False, withcoord=False, withhash=False, count=None,
sort=None, store=None, store_dist=None):
return await self._georadiusgeneric(,
name, longitude, latitude, radius,
unit=unit, withdist=withdist,
withcoord=withcoord, withhash=withhash,
count=count, sort=sort, store=store,
store_dist=store_dist)
|
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
with the ``latitude`` and ``longitude`` location and the maximum
distance from the center specified by the ``radius`` value.
The units must be one of the following : m, km mi, ft. By default
``withdist`` indicates to return the distances of each place.
``withcoord`` indicates to return the latitude and longitude of
each place.
``withhash`` indicates to return the geohash string of each place.
``count`` indicates to return the number of elements up to N.
``sort`` indicates to return the places in a sorted way, ASC for
nearest to fairest and DESC for fairest to nearest.
``store`` indicates to save the places names in a sorted set named
with a specific key, each element of the destination sorted set is
populated with the score got from the original geo sorted set.
``store_dist`` indicates to save the places names in a sorted set
named with a specific key, instead of ``store`` the sorted set
destination score is set with the distance.
|
377,184
|
def create_file_combobox(self, text, choices, option, default=NoDefault,
tip=None, restart=False, filters=None,
adjust_to_contents=False,
default_line_edit=False):
combobox = FileComboBox(self, adjust_to_contents=adjust_to_contents,
default_line_edit=default_line_edit)
combobox.restart_required = restart
combobox.label_text = text
edit = combobox.lineEdit()
edit.label_text = text
edit.restart_required = restart
self.lineedits[edit] = (option, default)
if tip is not None:
combobox.setToolTip(tip)
combobox.addItems(choices)
msg = _()
self.validate_data[edit] = (osp.isfile, msg)
browse_btn = QPushButton(ima.icon(), , self)
browse_btn.setToolTip(_("Select file"))
browse_btn.clicked.connect(lambda: self.select_file(edit, filters))
layout = QGridLayout()
layout.addWidget(combobox, 0, 0, 0, 9)
layout.addWidget(browse_btn, 0, 10)
layout.setContentsMargins(0, 0, 0, 0)
widget = QWidget(self)
widget.combobox = combobox
widget.browse_btn = browse_btn
widget.setLayout(layout)
return widget
|
choices: couples (name, key)
|
377,185
|
def address_from_public_key(pk_bytes):
final_bytes = bytearray()
final_bytes.append(6 << 3)
final_bytes.extend(pk_bytes)
final_bytes.extend(struct.pack("<H", _crc16_checksum(final_bytes)))
return base64.b32encode(final_bytes).decode()
|
Returns the base32-encoded version of pk_bytes (G...)
|
377,186
|
def get_metrics(self):
if(self.sloc == 0):
if(self.comments == 0):
ratio_comment_to_code = 0.00
else:
ratio_comment_to_code = 1.00
else:
ratio_comment_to_code = float(self.comments) / self.sloc
metrics = OrderedDict([(, self.sloc), (, self.comments),
(, round(ratio_comment_to_code, 2))])
return metrics
|
Calculate ratio_comment_to_code and return with the other values
|
377,187
|
def write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, snippet):
if gallery_conf[] is None:
return
example_file = os.path.join(target_dir, fname)
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
include_path = os.path.join(gallery_conf[],
gallery_conf[],
% backref)
seen = backref in seen_backrefs
with codecs.open(include_path, if seen else ,
encoding=) as ex_file:
if not seen:
heading = % backref
ex_file.write(heading + )
ex_file.write( * len(heading) + )
ex_file.write(_thumbnail_div(target_dir, gallery_conf[],
fname, snippet, is_backref=True))
seen_backrefs.add(backref)
|
Writes down back reference files, which include a thumbnail list
of examples using a certain module
|
377,188
|
def DeregisterMountPoint(cls, mount_point):
if mount_point not in cls._mount_points:
raise KeyError(.format(mount_point))
del cls._mount_points[mount_point]
|
Deregisters a path specification mount point.
Args:
mount_point (str): mount point identifier.
Raises:
KeyError: if the corresponding mount point is not set.
|
377,189
|
def f_add_parameter(self, *args, **kwargs):
return self._nn_interface._add_generic(self, type_name=PARAMETER,
group_type_name=PARAMETER_GROUP,
args=args, kwargs=kwargs)
|
Adds a parameter under the current node.
There are two ways to add a new parameter either by adding a parameter instance:
>>> new_parameter = Parameter('group1.group2.myparam', data=42, comment='Example!')
>>> traj.f_add_parameter(new_parameter)
Or by passing the values directly to the function, with the name being the first
(non-keyword!) argument:
>>> traj.f_add_parameter('group1.group2.myparam', 42, comment='Example!')
If you want to create a different parameter than the standard parameter, you can
give the constructor as the first (non-keyword!) argument followed by the name
(non-keyword!):
>>> traj.f_add_parameter(PickleParameter,'group1.group2.myparam', data=42, comment='Example!')
The full name of the current node is added as a prefix to the given parameter name.
If the current node is the trajectory the prefix `'parameters'` is added to the name.
Note, all non-keyword and keyword parameters apart from the optional constructor
are passed on as is to the constructor.
Moreover, you always should specify a default data value of a parameter,
even if you want to explore it later.
|
377,190
|
def load_file(folder_path, idx, corpus):
xml_path = os.path.join(folder_path, .format(idx))
wav_paths = glob.glob(os.path.join(folder_path, .format(idx)))
if len(wav_paths) == 0:
return []
xml_file = open(xml_path, , encoding=)
soup = BeautifulSoup(xml_file, )
transcription = soup.recording.cleaned_sentence.string
transcription_raw = soup.recording.sentence.string
gender = soup.recording.gender.string
is_native = soup.recording.muttersprachler.string
age_class = soup.recording.ageclass.string
speaker_idx = soup.recording.speaker_id.string
if speaker_idx not in corpus.issuers.keys():
start_age_class = int(age_class.split()[0])
if start_age_class < 12:
age_group = issuers.AgeGroup.CHILD
elif start_age_class < 18:
age_group = issuers.AgeGroup.YOUTH
elif start_age_class < 65:
age_group = issuers.AgeGroup.ADULT
else:
age_group = issuers.AgeGroup.SENIOR
native_lang = None
if is_native == :
native_lang =
issuer = issuers.Speaker(speaker_idx,
gender=issuers.Gender(gender),
age_group=age_group,
native_language=native_lang)
corpus.import_issuers(issuer)
utt_ids = []
for wav_path in wav_paths:
wav_name = os.path.split(wav_path)[1]
wav_idx = os.path.splitext(wav_name)[0]
corpus.new_file(wav_path, wav_idx)
utt = corpus.new_utterance(wav_idx, wav_idx, speaker_idx)
utt.set_label_list(annotations.LabelList.create_single(
transcription,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT
))
utt.set_label_list(annotations.LabelList.create_single(
transcription_raw,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT_RAW
))
utt_ids.append(wav_idx)
return utt_ids
|
Load speaker, file, utterance, labels for the file with the given id.
|
377,191
|
def get_gene_disease(self, direct_evidence=None, inference_chemical_name=None, inference_score=None,
gene_name=None, gene_symbol=None, gene_id=None, disease_name=None, disease_id=None,
disease_definition=None, limit=None, as_df=False):
q = self.session.query(models.GeneDisease)
if direct_evidence:
q = q.filter(models.GeneDisease.direct_evidence == direct_evidence)
if inference_chemical_name:
q = q.filter(models.GeneDisease.inference_chemical_name == inference_chemical_name)
if inference_score:
q = q.filter(models.GeneDisease.inference_score == inference_score)
q = self._join_disease(query=q, disease_definition=disease_definition, disease_id=disease_id,
disease_name=disease_name)
q = self._join_gene(q, gene_name=gene_name, gene_symbol=gene_symbol, gene_id=gene_id)
return self._limit_and_df(q, limit, as_df)
|
Get gene–disease associations
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int gene_id: gene identifier
:param str gene_symbol: gene symbol
:param str gene_name: gene name
:param str direct_evidence: direct evidence
:param str inference_chemical_name: inference_chemical_name
:param float inference_score: inference score
:param str inference_chemical_name: chemical name
:param disease_name: disease name
:param disease_id: disease identifier
:param disease_definition: disease definition
:param int limit: maximum number of results
:return: list of :class:`pyctd.manager.database.models.GeneDisease` objects
.. seealso::
:class:`pyctd.manager.models.GeneDisease`
which is linked to:
:class:`pyctd.manager.models.Chemical`
:class:`pyctd.manager.models.Gene`
|
377,192
|
def replace(old, new):
parent = old.getparent()
parent.replace(old, new)
|
A simple way to replace one element node with another.
|
377,193
|
def order_vertices(self):
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent>i:
ordered = False
self.swap_vertices(i, parent)
|
Order vertices in the graph such that parents always have a lower index than children.
|
377,194
|
def auth_user_remote_user(self, username):
user = self.find_user(username=username)
if user is None and self.auth_user_registration:
user = self.add_user(
username=username,
first_name=username,
last_name="-",
email="-",
role=self.find_role(self.auth_user_registration_role),
)
elif user is None or (not user.is_active):
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
self.update_user_auth_stat(user)
return user
|
REMOTE_USER user Authentication
:param username: user's username for remote auth
:type self: User model
|
377,195
|
def zip_file(fn, mode="r"):
if isdir(fn):
return ExplodedZipFile(fn)
elif is_zipfile(fn):
return ZipFile(fn, mode)
else:
raise Exception("cannot treat as an archive: %r" % fn)
|
returns either a zipfile.ZipFile instance or an ExplodedZipFile
instance, depending on whether fn is the name of a valid zip file,
or a directory.
|
377,196
|
def next_except_jump(self, start):
if self.code[start] == self.opc.DUP_TOP:
except_match = self.first_instr(start, len(self.code), self.opc.POP_JUMP_IF_FALSE)
if except_match:
jmp = self.prev_op[self.get_target(except_match)]
self.ignore_if.add(except_match)
self.not_continue.add(jmp)
return jmp
count_END_FINALLY = 0
count_SETUP_ = 0
for i in self.op_range(start, len(self.code)):
op = self.code[i]
if op == self.opc.END_FINALLY:
if count_END_FINALLY == count_SETUP_:
assert self.code[self.prev_op[i]] in frozenset([self.opc.JUMP_ABSOLUTE,
self.opc.JUMP_FORWARD,
self.opc.RETURN_VALUE])
self.not_continue.add(self.prev_op[i])
return self.prev_op[i]
count_END_FINALLY += 1
elif op in self.setup_opts_no_loop:
count_SETUP_ += 1
|
Return the next jump that was generated by an except SomeException:
construct in a try...except...else clause or None if not found.
|
377,197
|
def request_data(key, url, file, string_content, start, end, fix_apple):
data = []
try:
data += events(url=url, file=file, string_content=string_content,
start=start, end=end, fix_apple=fix_apple)
finally:
update_events(key, data)
request_finished(key)
|
Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
|
377,198
|
def get_stp_mst_detail_output_msti_port_configured_root_guard(self, **kwargs):
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop()
port = ET.SubElement(msti, "port")
configured_root_guard = ET.SubElement(port, "configured-root-guard")
configured_root_guard.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
377,199
|
def get_compounds(identifier, namespace=, searchtype=None, as_dataframe=False, **kwargs):
results = get_json(identifier, namespace, searchtype=searchtype, **kwargs)
compounds = [Compound(r) for r in results[]] if results else []
if as_dataframe:
return compounds_to_frame(compounds)
return compounds
|
Retrieve the specified compound records from PubChem.
:param identifier: The compound identifier to use as a search query.
:param namespace: (optional) The identifier type, one of cid, name, smiles, sdf, inchi, inchikey or formula.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Compound` properties into a pandas
:class:`~pandas.DataFrame` and return that.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.